Loading...
Loading...
Copy-paste ready code snippets for your projects. Filter by language or search.
Express session middleware configured with Redis store for scalable, persistent sessions across multiple server instances.
const session = require('express-session');
const RedisStore = require('connect-redis').default;
const { createClient } = require('redis');
async function setupSession(app) {
const redisClient = createClient({
url: process.env.REDIS_URL || 'redis://localhost:6379',
socket: {
reconnectStrategy: (retries) => {
if (retries > 10) return new Error('Redis max retries reached');
return Math.min(retries * 100, 3000);
}
}
});
redisClient.on('error', (err) => console.error('Redis error:', err.message));
redisClient.on('connect', () => console.log('Redis connected'));
await redisClient.connect();
const store = new RedisStore({
client: redisClient,
prefix: 'sess:',
ttl: 86400, // 24 hours
});
app.use(session({
store,
secret: process.env.SESSION_SECRET,
resave: false,
saveUninitialized: false,
name: 'sid',
cookie: {
secure: process.env.NODE_ENV === 'production',
httpOnly: true,
maxAge: 1000 * 60 * 60 * 24, // 24 hours
sameSite: 'lax',
domain: process.env.COOKIE_DOMAIN || undefined,
},
rolling: true, // Reset expiry on each request
}));
// Graceful shutdown
process.on('SIGTERM', async () => {
await redisClient.quit();
console.log('Redis connection closed');
});
return redisClient;
}
module.exports = setupSession;Socket.io event handler with room management, authentication, rate limiting, and typed events for real-time features.
const { Server } = require('socket.io');
const jwt = require('jsonwebtoken');
function setupWebSocket(httpServer) {
const io = new Server(httpServer, {
cors: { origin: process.env.CLIENT_URL || '*', methods: ['GET', 'POST'] },
pingInterval: 25000,
pingTimeout: 60000,
maxHttpBufferSize: 1e6, // 1MB
});
// Authentication middleware
io.use((socket, next) => {
const token = socket.handshake.auth.token;
if (!token) return next(new Error('Authentication required'));
try {
const user = jwt.verify(token, process.env.JWT_SECRET);
socket.user = user;
next();
} catch (err) {
next(new Error('Invalid token'));
}
});
// Rate limiting per socket
const rateLimits = new Map();
function checkRate(socketId, limit = 30, windowMs = 60000) {
const now = Date.now();
const record = rateLimits.get(socketId) || { count: 0, resetAt: now + windowMs };
if (now > record.resetAt) { record.count = 0; record.resetAt = now + windowMs; }
record.count++;
rateLimits.set(socketId, record);
return record.count <= limit;
}
io.on('connection', (socket) => {
console.log(`User connected: ${socket.user.id}`);
// Join user's personal room
socket.join(`user:${socket.user.id}`);
// Chat room management
socket.on('join-room', (roomId) => {
socket.join(roomId);
socket.to(roomId).emit('user-joined', { userId: socket.user.id });
});
socket.on('leave-room', (roomId) => {
socket.leave(roomId);
socket.to(roomId).emit('user-left', { userId: socket.user.id });
});
// Message handling with rate limiting
socket.on('message', (data) => {
if (!checkRate(socket.id)) {
return socket.emit('error', { message: 'Rate limit exceeded' });
}
const { roomId, content } = data;
io.to(roomId).emit('message', {
userId: socket.user.id,
content,
timestamp: new Date().toISOString(),
});
});
socket.on('disconnect', () => {
rateLimits.delete(socket.id);
console.log(`User disconnected: ${socket.user.id}`);
});
});
return io;
}
module.exports = setupWebSocket;Node.js cron job manager with job registration, error handling, execution logging, and graceful shutdown.
const cron = require('node-cron');
class CronManager {
constructor() {
this.jobs = new Map();
this.logs = [];
}
register(name, schedule, handler, options = {}) {
if (this.jobs.has(name)) {
console.warn(`Cron job '${name}' already registered. Skipping.`);
return this;
}
const { timezone, runOnInit = false, enabled = true } = options;
if (!cron.validate(schedule)) {
throw new Error(`Invalid cron schedule for '${name}': ${schedule}`);
}
const wrappedHandler = async () => {
const start = Date.now();
const logEntry = { name, startedAt: new Date(), status: 'running' };
try {
console.log(`[CRON] Running: ${name}`);
await handler();
logEntry.status = 'success';
logEntry.duration = Date.now() - start;
console.log(`[CRON] Completed: ${name} (${logEntry.duration}ms)`);
} catch (err) {
logEntry.status = 'failed';
logEntry.error = err.message;
logEntry.duration = Date.now() - start;
console.error(`[CRON] Failed: ${name} -`, err.message);
}
this.logs.push(logEntry);
if (this.logs.length > 1000) this.logs.shift();
};
const task = cron.schedule(schedule, wrappedHandler, {
scheduled: enabled,
timezone,
});
this.jobs.set(name, { task, schedule, handler: wrappedHandler, options });
if (runOnInit) wrappedHandler();
console.log(`[CRON] Registered: ${name} (${schedule})`);
return this;
}
start(name) {
const job = this.jobs.get(name);
if (job) job.task.start();
return this;
}
stop(name) {
const job = this.jobs.get(name);
if (job) job.task.stop();
return this;
}
stopAll() {
for (const [name, { task }] of this.jobs) {
task.stop();
console.log(`[CRON] Stopped: ${name}`);
}
}
getStatus() {
const status = {};
for (const [name, { schedule }] of this.jobs) {
const lastLog = [...this.logs].reverse().find(l => l.name === name);
status[name] = { schedule, lastRun: lastLog || null };
}
return status;
}
getLogs(name, limit = 50) {
const filtered = name ? this.logs.filter(l => l.name === name) : this.logs;
return filtered.slice(-limit);
}
}
module.exports = new CronManager();Production-ready Docker Compose configuration for a Node.js app with MongoDB, Redis, Nginx reverse proxy, and monitoring.
# docker-compose.yml - Full Stack Production Setup
version: '3.8'
services:
app:
build:
context: .
dockerfile: Dockerfile
target: production
restart: unless-stopped
environment:
NODE_ENV: production
MONGO_URI: mongodb://mongo:27017/myapp
REDIS_URL: redis://redis:6379
PORT: 3000
depends_on:
mongo:
condition: service_healthy
redis:
condition: service_healthy
networks:
- backend
deploy:
replicas: 2
resources:
limits:
memory: 512M
cpus: '0.5'
nginx:
image: nginx:alpine
ports:
- "80:80"
- "443:443"
volumes:
- ./nginx/nginx.conf:/etc/nginx/nginx.conf:ro
- ./nginx/ssl:/etc/nginx/ssl:ro
- ./public:/var/www/public:ro
depends_on:
- app
restart: unless-stopped
networks:
- backend
mongo:
image: mongo:7
restart: unless-stopped
volumes:
- mongo_data:/data/db
- ./backups:/backups
environment:
MONGO_INITDB_DATABASE: myapp
healthcheck:
test: mongosh --eval "db.adminCommand('ping')"
interval: 10s
timeout: 5s
retries: 5
networks:
- backend
redis:
image: redis:7-alpine
restart: unless-stopped
command: redis-server --appendonly yes --maxmemory 128mb --maxmemory-policy allkeys-lru
volumes:
- redis_data:/data
healthcheck:
test: redis-cli ping
interval: 10s
timeout: 5s
retries: 5
networks:
- backend
volumes:
mongo_data:
redis_data:
networks:
backend:
driver: bridgeType-safe environment variable loader with validation, defaults, required field checks, and .env file parsing.
const fs = require('fs');
const path = require('path');
class EnvLoader {
constructor(envPath = '.env') {
this.vars = {};
this.errors = [];
this._loadFile(envPath);
}
_loadFile(envPath) {
const fullPath = path.resolve(process.cwd(), envPath);
if (!fs.existsSync(fullPath)) return;
const content = fs.readFileSync(fullPath, 'utf8');
content.split('\n').forEach(line => {
line = line.trim();
if (!line || line.startsWith('#')) return;
const idx = line.indexOf('=');
if (idx === -1) return;
const key = line.slice(0, idx).trim();
let value = line.slice(idx + 1).trim();
// Remove surrounding quotes
if ((value.startsWith('"') && value.endsWith('"')) ||
(value.startsWith("'") && value.endsWith("'"))) {
value = value.slice(1, -1);
}
if (!process.env[key]) process.env[key] = value;
});
}
string(key, defaultValue) {
const val = process.env[key];
if (val !== undefined) { this.vars[key] = val; return val; }
if (defaultValue !== undefined) { this.vars[key] = defaultValue; return defaultValue; }
this.errors.push(`Missing required env var: ${key}`);
return '';
}
number(key, defaultValue) {
const raw = process.env[key];
if (raw !== undefined) {
const num = Number(raw);
if (isNaN(num)) { this.errors.push(`${key} must be a number, got: ${raw}`); return 0; }
this.vars[key] = num; return num;
}
if (defaultValue !== undefined) { this.vars[key] = defaultValue; return defaultValue; }
this.errors.push(`Missing required env var: ${key}`);
return 0;
}
boolean(key, defaultValue = false) {
const raw = process.env[key];
if (raw !== undefined) {
const val = ['true', '1', 'yes'].includes(raw.toLowerCase());
this.vars[key] = val; return val;
}
this.vars[key] = defaultValue; return defaultValue;
}
required(key) {
const val = process.env[key];
if (!val) this.errors.push(`Missing required env var: ${key}`);
this.vars[key] = val || '';
return val || '';
}
validate() {
if (this.errors.length > 0) {
console.error('Environment validation failed:');
this.errors.forEach(e => console.error(` - ${e}`));
process.exit(1);
}
return this.vars;
}
}
// Usage:
// const env = new EnvLoader();
// const config = {
// port: env.number('PORT', 3000),
// dbUrl: env.required('MONGO_URI'),
// debug: env.boolean('DEBUG', false),
// secret: env.required('JWT_SECRET'),
// };
// env.validate();
module.exports = EnvLoader;Reusable MongoDB pagination helper with sorting, filtering, search, and metadata for building paginated API responses.
/**
* Paginate a Mongoose model with filtering, sorting, and search
* @param {Model} model - Mongoose model
* @param {Object} options - Pagination options
* @returns {Object} - Paginated results with metadata
*/
async function paginate(model, options = {}) {
const {
page = 1,
limit = 20,
sort = '-createdAt',
filter = {},
search = '',
searchFields = [],
select = '',
populate = '',
} = options;
const pageNum = Math.max(1, parseInt(page));
const limitNum = Math.min(100, Math.max(1, parseInt(limit)));
const skip = (pageNum - 1) * limitNum;
// Build query
const query = { ...filter };
// Add text search if provided
if (search && searchFields.length > 0) {
query.$or = searchFields.map(field => ({
[field]: { $regex: search, $options: 'i' }
}));
}
// Parse sort string (e.g., '-createdAt,title' => { createdAt: -1, title: 1 })
const sortObj = {};
sort.split(',').forEach(field => {
const trimmed = field.trim();
if (trimmed.startsWith('-')) {
sortObj[trimmed.slice(1)] = -1;
} else {
sortObj[trimmed] = 1;
}
});
// Execute query and count in parallel
const [docs, totalDocs] = await Promise.all([
model.find(query)
.sort(sortObj)
.skip(skip)
.limit(limitNum)
.select(select)
.populate(populate)
.lean(),
model.countDocuments(query)
]);
const totalPages = Math.ceil(totalDocs / limitNum);
return {
data: docs,
pagination: {
page: pageNum,
limit: limitNum,
totalDocs,
totalPages,
hasNextPage: pageNum < totalPages,
hasPrevPage: pageNum > 1,
nextPage: pageNum < totalPages ? pageNum + 1 : null,
prevPage: pageNum > 1 ? pageNum - 1 : null,
}
};
}
// Usage in a controller:
// const result = await paginate(Article, {
// page: req.query.page,
// limit: req.query.limit,
// sort: '-publishedAt',
// filter: { status: 'published' },
// search: req.query.q,
// searchFields: ['title', 'description'],
// populate: 'author',
// });
// res.json(result);
module.exports = paginate;Comprehensive Zod validation schemas for user registration, API request bodies, environment variables, and nested objects.
import { z } from 'zod';
// User registration schema with custom validators
const userRegistrationSchema = z.object({
name: z.string().min(2, 'Name must be at least 2 characters').max(100),
email: z.string().email('Invalid email format').toLowerCase(),
password: z.string()
.min(8, 'Password must be at least 8 characters')
.regex(/[A-Z]/, 'Must contain an uppercase letter')
.regex(/[0-9]/, 'Must contain a number')
.regex(/[^A-Za-z0-9]/, 'Must contain a special character'),
confirmPassword: z.string(),
role: z.enum(['user', 'admin', 'moderator']).default('user'),
profile: z.object({
bio: z.string().max(500).optional(),
website: z.string().url().optional().or(z.literal('')),
avatar: z.string().url().optional(),
}).optional(),
tags: z.array(z.string()).max(10).default([]),
acceptTerms: z.literal(true, {
errorMap: () => ({ message: 'You must accept the terms' }),
}),
}).refine(data => data.password === data.confirmPassword, {
message: 'Passwords do not match',
path: ['confirmPassword'],
});
// Environment variables validation
const envSchema = z.object({
NODE_ENV: z.enum(['development', 'production', 'test']).default('development'),
PORT: z.coerce.number().int().min(1).max(65535).default(3000),
DATABASE_URL: z.string().url(),
JWT_SECRET: z.string().min(32),
REDIS_URL: z.string().url().optional(),
SMTP_HOST: z.string().optional(),
SMTP_PORT: z.coerce.number().optional(),
LOG_LEVEL: z.enum(['debug', 'info', 'warn', 'error']).default('info'),
});
// API pagination query schema
const paginationSchema = z.object({
page: z.coerce.number().int().min(1).default(1),
limit: z.coerce.number().int().min(1).max(100).default(20),
sort: z.string().optional(),
order: z.enum(['asc', 'desc']).default('desc'),
search: z.string().max(200).optional(),
});
// Helper: validate and extract typed data
function validate<T>(schema: z.ZodSchema<T>, data: unknown): T {
const result = schema.safeParse(data);
if (!result.success) {
const errors = result.error.errors.map(e => `${e.path.join('.')}: ${e.message}`);
throw new Error(`Validation failed: ${errors.join(', ')}`);
}
return result.data;
}
type UserRegistration = z.infer<typeof userRegistrationSchema>;
type Env = z.infer<typeof envSchema>;
type Pagination = z.infer<typeof paginationSchema>;
export { userRegistrationSchema, envSchema, paginationSchema, validate };Collection of reusable React hooks: useDebounce, useLocalStorage, useFetch, useIntersectionObserver, and useMediaQuery.
import { useState, useEffect, useRef, useCallback } from 'react';
// Debounce hook - delays value updates
function useDebounce<T>(value: T, delay: number = 300): T {
const [debounced, setDebounced] = useState(value);
useEffect(() => {
const timer = setTimeout(() => setDebounced(value), delay);
return () => clearTimeout(timer);
}, [value, delay]);
return debounced;
}
// LocalStorage hook with SSR safety
function useLocalStorage<T>(key: string, initialValue: T): [T, (value: T | ((prev: T) => T)) => void] {
const [stored, setStored] = useState<T>(() => {
if (typeof window === 'undefined') return initialValue;
try {
const item = window.localStorage.getItem(key);
return item ? JSON.parse(item) : initialValue;
} catch { return initialValue; }
});
const setValue = useCallback((value: T | ((prev: T) => T)) => {
setStored(prev => {
const next = value instanceof Function ? value(prev) : value;
window.localStorage.setItem(key, JSON.stringify(next));
return next;
});
}, [key]);
return [stored, setValue];
}
// Fetch hook with loading/error states
function useFetch<T>(url: string, options?: RequestInit) {
const [data, setData] = useState<T | null>(null);
const [loading, setLoading] = useState(true);
const [error, setError] = useState<string | null>(null);
useEffect(() => {
const controller = new AbortController();
setLoading(true);
fetch(url, { ...options, signal: controller.signal })
.then(res => { if (!res.ok) throw new Error(res.statusText); return res.json(); })
.then(setData)
.catch(err => { if (err.name !== 'AbortError') setError(err.message); })
.finally(() => setLoading(false));
return () => controller.abort();
}, [url]);
return { data, loading, error };
}
// Intersection Observer hook for lazy loading
function useIntersectionObserver(options?: IntersectionObserverInit) {
const [entry, setEntry] = useState<IntersectionObserverEntry | null>(null);
const ref = useRef<HTMLElement | null>(null);
useEffect(() => {
if (!ref.current) return;
const observer = new IntersectionObserver(([e]) => setEntry(e), options);
observer.observe(ref.current);
return () => observer.disconnect();
}, [ref.current, options?.threshold, options?.rootMargin]);
return { ref, entry, isVisible: !!entry?.isIntersecting };
}
export { useDebounce, useLocalStorage, useFetch, useIntersectionObserver };Complete dark/light mode implementation using CSS custom properties with smooth transitions and system preference detection.
/* Dark Mode Toggle using CSS Custom Properties */
:root {
--bg-primary: #0a0d12;
--bg-secondary: #0f1218;
--bg-card: rgba(15, 18, 24, 0.6);
--text-primary: #ffffff;
--text-secondary: #a0aabf;
--text-muted: #555;
--accent: #00e1ff;
--accent-glow: rgba(0, 225, 255, 0.15);
--border: rgba(255, 255, 255, 0.06);
--shadow: 0 8px 32px rgba(0, 0, 0, 0.4);
--transition: 0.3s cubic-bezier(0.4, 0, 0.2, 1);
}
[data-theme="light"] {
--bg-primary: #ffffff;
--bg-secondary: #f8f9fa;
--bg-card: rgba(255, 255, 255, 0.8);
--text-primary: #1a1a2e;
--text-secondary: #555;
--text-muted: #888;
--accent: #0066cc;
--accent-glow: rgba(0, 102, 204, 0.1);
--border: rgba(0, 0, 0, 0.08);
--shadow: 0 8px 32px rgba(0, 0, 0, 0.08);
}
body {
background: var(--bg-primary);
color: var(--text-primary);
transition: background var(--transition), color var(--transition);
}
.card {
background: var(--bg-card);
border: 1px solid var(--border);
box-shadow: var(--shadow);
transition: all var(--transition);
}
/* Theme toggle button */
.theme-toggle {
position: relative;
width: 56px; height: 28px;
border-radius: 14px;
background: var(--bg-secondary);
border: 1px solid var(--border);
cursor: pointer;
transition: all var(--transition);
}
.theme-toggle::after {
content: '';
position: absolute;
top: 3px; left: 3px;
width: 20px; height: 20px;
border-radius: 50%;
background: var(--accent);
transition: transform var(--transition);
}
[data-theme="light"] .theme-toggle::after {
transform: translateX(28px);
}
/* Respect system preference */
@media (prefers-color-scheme: light) {
:root:not([data-theme]) {
--bg-primary: #ffffff;
--bg-secondary: #f8f9fa;
--text-primary: #1a1a2e;
}
}Modern glassmorphism card design with backdrop blur, gradient borders, hover animations, and responsive layout.
/* Glassmorphism Card Component */
.glass-card {
position: relative;
background: rgba(255, 255, 255, 0.03);
backdrop-filter: blur(20px);
-webkit-backdrop-filter: blur(20px);
border-radius: 20px;
border: 1px solid rgba(255, 255, 255, 0.08);
padding: 32px;
overflow: hidden;
transition: all 0.4s cubic-bezier(0.4, 0, 0.2, 1);
}
/* Gradient border effect */
.glass-card::before {
content: '';
position: absolute;
inset: 0;
border-radius: 20px;
padding: 1px;
background: linear-gradient(
135deg,
rgba(0, 225, 255, 0.3),
transparent 40%,
transparent 60%,
rgba(167, 139, 250, 0.3)
);
-webkit-mask: linear-gradient(#fff 0 0) content-box, linear-gradient(#fff 0 0);
mask: linear-gradient(#fff 0 0) content-box, linear-gradient(#fff 0 0);
-webkit-mask-composite: xor;
mask-composite: exclude;
opacity: 0;
transition: opacity 0.4s ease;
}
.glass-card:hover::before { opacity: 1; }
.glass-card:hover {
transform: translateY(-8px);
box-shadow:
0 20px 60px rgba(0, 0, 0, 0.3),
0 0 40px rgba(0, 225, 255, 0.05);
}
/* Inner glow effect */
.glass-card::after {
content: '';
position: absolute;
top: -50%; left: -50%;
width: 200%; height: 200%;
background: radial-gradient(
circle at var(--mouse-x, 50%) var(--mouse-y, 50%),
rgba(0, 225, 255, 0.06) 0%,
transparent 50%
);
pointer-events: none;
opacity: 0;
transition: opacity 0.3s;
}
.glass-card:hover::after { opacity: 1; }
/* Responsive grid layout */
.glass-grid {
display: grid;
grid-template-columns: repeat(auto-fill, minmax(320px, 1fr));
gap: 24px;
padding: 20px 0;
}
@media (max-width: 640px) {
.glass-grid { grid-template-columns: 1fr; }
.glass-card { padding: 24px; border-radius: 16px; }
}Flexible CSS grid system with auto-fit columns, named areas, responsive breakpoints, and utility classes.
/* Responsive Grid System with CSS Grid */
.grid {
display: grid;
gap: var(--grid-gap, 24px);
width: 100%;
}
/* Auto-fit columns */
.grid-auto { grid-template-columns: repeat(auto-fit, minmax(var(--col-min, 280px), 1fr)); }
.grid-2 { grid-template-columns: repeat(2, 1fr); }
.grid-3 { grid-template-columns: repeat(3, 1fr); }
.grid-4 { grid-template-columns: repeat(4, 1fr); }
/* Gap utilities */
.gap-sm { --grid-gap: 12px; }
.gap-md { --grid-gap: 24px; }
.gap-lg { --grid-gap: 40px; }
/* Span utilities */
.col-span-2 { grid-column: span 2; }
.col-span-3 { grid-column: span 3; }
.col-span-full { grid-column: 1 / -1; }
.row-span-2 { grid-row: span 2; }
/* Dashboard layout with named areas */
.grid-dashboard {
grid-template-columns: 250px 1fr 300px;
grid-template-rows: auto 1fr auto;
grid-template-areas:
"sidebar header header"
"sidebar main aside"
"sidebar footer footer";
min-height: 100vh;
}
.grid-dashboard > .sidebar { grid-area: sidebar; }
.grid-dashboard > .header { grid-area: header; }
.grid-dashboard > .main { grid-area: main; }
.grid-dashboard > .aside { grid-area: aside; }
.grid-dashboard > .footer { grid-area: footer; }
/* Responsive breakpoints */
@media (max-width: 1024px) {
.grid-3, .grid-4 { grid-template-columns: repeat(2, 1fr); }
.grid-dashboard {
grid-template-columns: 1fr;
grid-template-areas: "header" "main" "aside" "footer";
}
.grid-dashboard > .sidebar { display: none; }
}
@media (max-width: 640px) {
.grid-2, .grid-3, .grid-4 { grid-template-columns: 1fr; }
.col-span-2, .col-span-3 { grid-column: span 1; }
.grid { --grid-gap: 16px; }
}
/* Alignment utilities */
.items-center { align-items: center; }
.items-start { align-items: start; }
.justify-center { justify-items: center; }
.place-center { place-items: center; }Cross-browser custom scrollbar styling with thin/overlay variants, Firefox support, and dark theme optimization.
/* Custom Scrollbar Styles - Cross Browser */
/* === Webkit (Chrome, Safari, Edge) === */
::-webkit-scrollbar {
width: 8px;
height: 8px;
}
::-webkit-scrollbar-track {
background: transparent;
}
::-webkit-scrollbar-thumb {
background: rgba(255, 255, 255, 0.1);
border-radius: 4px;
border: 2px solid transparent;
background-clip: padding-box;
transition: background 0.3s;
}
::-webkit-scrollbar-thumb:hover {
background: rgba(0, 225, 255, 0.3);
border: 2px solid transparent;
background-clip: padding-box;
}
::-webkit-scrollbar-corner {
background: transparent;
}
/* === Firefox === */
* {
scrollbar-width: thin;
scrollbar-color: rgba(255, 255, 255, 0.1) transparent;
}
/* Thin variant for nested containers */
.scroll-thin::-webkit-scrollbar {
width: 4px;
height: 4px;
}
.scroll-thin {
scrollbar-width: thin;
}
/* Auto-hide scrollbar (overlay style) */
.scroll-overlay {
overflow: auto;
}
.scroll-overlay::-webkit-scrollbar {
width: 6px;
}
.scroll-overlay::-webkit-scrollbar-thumb {
background: transparent;
transition: background 0.3s;
}
.scroll-overlay:hover::-webkit-scrollbar-thumb {
background: rgba(255, 255, 255, 0.15);
}
/* Horizontal scroll container */
.scroll-horizontal {
overflow-x: auto;
overflow-y: hidden;
display: flex;
gap: 16px;
padding-bottom: 8px;
scroll-snap-type: x mandatory;
-webkit-overflow-scrolling: touch;
}
.scroll-horizontal > * {
scroll-snap-align: start;
flex-shrink: 0;
}
/* Hide scrollbar completely (for touch/carousel) */
.scroll-hidden {
overflow: auto;
-ms-overflow-style: none;
scrollbar-width: none;
}
.scroll-hidden::-webkit-scrollbar {
display: none;
}
/* Code block scrollbar accent */
pre::-webkit-scrollbar-thumb,
code::-webkit-scrollbar-thumb {
background: rgba(0, 225, 255, 0.15);
}
pre::-webkit-scrollbar-thumb:hover,
code::-webkit-scrollbar-thumb:hover {
background: rgba(0, 225, 255, 0.3);
}Comprehensive Docker cleanup script that removes stopped containers, dangling images, unused volumes, and old build cache.
#!/bin/bash
# Docker Cleanup Script - removes unused resources safely
set -euo pipefail
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
NC='\033[0m'
echo -e "${YELLOW}=== Docker Cleanup Script ===${NC}"
# Stop all running containers (optional - uncomment if needed)
# echo -e "${YELLOW}Stopping all running containers...${NC}"
# docker stop $(docker ps -q) 2>/dev/null || true
echo -e "${GREEN}Removing stopped containers...${NC}"
docker container prune -f
echo -e "${GREEN}Removing dangling images...${NC}"
docker image prune -f
echo -e "${GREEN}Removing unused volumes...${NC}"
docker volume prune -f
echo -e "${GREEN}Removing unused networks...${NC}"
docker network prune -f
echo -e "${GREEN}Removing build cache older than 7 days...${NC}"
docker builder prune -f --filter "until=168h"
# Remove images older than 30 days
echo -e "${GREEN}Removing images older than 30 days...${NC}"
docker image prune -a -f --filter "until=720h"
# Show disk usage summary
echo -e "${YELLOW}\n=== Disk Usage Summary ===${NC}"
docker system df
echo -e "${GREEN}\nCleanup complete!${NC}"Automated SSL certificate renewal script using Certbot with pre/post hooks for web server restart and Slack notifications.
#!/bin/bash
# SSL Certificate Auto-Renewal with notifications
set -euo pipefail
DOMAIN="${1:-example.com}"
EMAIL="admin@${DOMAIN}"
WEBROOT="/var/www/html"
SLACK_WEBHOOK="${SLACK_WEBHOOK_URL:-}"
LOG_FILE="/var/log/ssl-renewal.log"
log() { echo "[$(date '+%Y-%m-%d %H:%M:%S')] $1" | tee -a "$LOG_FILE"; }
notify() {
if [ -n "$SLACK_WEBHOOK" ]; then
curl -s -X POST "$SLACK_WEBHOOK" \
-H 'Content-Type: application/json' \
-d "{\"text\": \"$1\"}" > /dev/null
fi
}
log "Starting SSL renewal check for $DOMAIN"
# Check certificate expiry
EXPIRY=$(openssl s_client -connect "${DOMAIN}:443" -servername "$DOMAIN" </dev/null 2>/dev/null | \
openssl x509 -noout -enddate 2>/dev/null | cut -d= -f2)
if [ -n "$EXPIRY" ]; then
EXPIRY_EPOCH=$(date -d "$EXPIRY" +%s 2>/dev/null || date -jf "%b %d %T %Y %Z" "$EXPIRY" +%s)
NOW_EPOCH=$(date +%s)
DAYS_LEFT=$(( (EXPIRY_EPOCH - NOW_EPOCH) / 86400 ))
log "Certificate expires in $DAYS_LEFT days ($EXPIRY)"
if [ "$DAYS_LEFT" -gt 30 ]; then
log "No renewal needed. Exiting."
exit 0
fi
fi
log "Attempting certificate renewal..."
if certbot renew --webroot -w "$WEBROOT" --quiet --deploy-hook "systemctl reload nginx"; then
log "SSL certificate renewed successfully"
notify "SSL certificate for $DOMAIN renewed successfully"
else
log "ERROR: SSL renewal failed!"
notify "ALERT: SSL renewal FAILED for $DOMAIN"
exit 1
fiAutomated MongoDB backup with compression, rotation, optional S3 upload, and email notifications on failure.
#!/bin/bash
# MongoDB Backup Script with rotation and S3 upload
set -euo pipefail
DB_NAME="${MONGO_DB:-myapp}"
MONGO_URI="${MONGO_URI:-mongodb://localhost:27017}"
BACKUP_DIR="/backups/mongodb"
RETENTION_DAYS=14
S3_BUCKET="${S3_BUCKET:-}"
DATE=$(date +%Y%m%d_%H%M%S)
BACKUP_PATH="${BACKUP_DIR}/${DB_NAME}_${DATE}"
mkdir -p "$BACKUP_DIR"
echo "[$(date)] Starting backup of $DB_NAME..."
# Create backup
mongodump --uri="$MONGO_URI" --db="$DB_NAME" --out="$BACKUP_PATH" --gzip
# Compress into single archive
ARCHIVE="${BACKUP_PATH}.tar.gz"
tar -czf "$ARCHIVE" -C "$BACKUP_DIR" "${DB_NAME}_${DATE}"
rm -rf "$BACKUP_PATH"
SIZE=$(du -h "$ARCHIVE" | cut -f1)
echo "[$(date)] Backup created: $ARCHIVE ($SIZE)"
# Upload to S3 if configured
if [ -n "$S3_BUCKET" ]; then
echo "Uploading to S3..."
aws s3 cp "$ARCHIVE" "s3://${S3_BUCKET}/mongodb/$(basename $ARCHIVE)" \
--storage-class STANDARD_IA
echo "Uploaded to s3://${S3_BUCKET}/mongodb/"
fi
# Rotate old backups
echo "Removing backups older than $RETENTION_DAYS days..."
find "$BACKUP_DIR" -name "*.tar.gz" -mtime +$RETENTION_DAYS -delete
REMAINING=$(ls -1 "$BACKUP_DIR"/*.tar.gz 2>/dev/null | wc -l)
echo "[$(date)] Backup complete. $REMAINING backups retained."
echo "---"
echo "Database: $DB_NAME"
echo "Archive: $ARCHIVE"
echo "Size: $SIZE"Zero-downtime deployment script for Node.js apps using PM2 with health checks, rollback support, and deployment logging.
#!/bin/bash
# PM2 Zero-Downtime Deployment Script
set -euo pipefail
APP_NAME="${1:-myapp}"
APP_DIR="${2:-/var/www/$APP_NAME}"
BRANCH="${3:-main}"
LOG_FILE="/var/log/deploy-${APP_NAME}.log"
log() { echo "[$(date '+%Y-%m-%d %H:%M:%S')] $1" | tee -a "$LOG_FILE"; }
log "=== Starting deployment of $APP_NAME ==="
cd "$APP_DIR"
# Save current commit for rollback
PREV_COMMIT=$(git rev-parse HEAD)
log "Current commit: $PREV_COMMIT"
# Pull latest changes
log "Pulling from $BRANCH..."
git fetch origin "$BRANCH"
git reset --hard "origin/$BRANCH"
NEW_COMMIT=$(git rev-parse HEAD)
log "New commit: $NEW_COMMIT"
if [ "$PREV_COMMIT" = "$NEW_COMMIT" ]; then
log "No changes detected. Skipping deployment."
exit 0
fi
# Install dependencies
log "Installing dependencies..."
npm ci --production
# Run migrations if script exists
if [ -f "scripts/migrate.js" ]; then
log "Running migrations..."
node scripts/migrate.js
fi
# Reload with zero downtime
log "Reloading PM2 processes..."
pm2 reload "$APP_NAME" --update-env
# Health check
sleep 5
if pm2 show "$APP_NAME" | grep -q "online"; then
log "Deployment successful! App is online."
else
log "ERROR: App not healthy after deploy. Rolling back..."
git reset --hard "$PREV_COMMIT"
npm ci --production
pm2 reload "$APP_NAME" --update-env
log "Rolled back to $PREV_COMMIT"
exit 1
fi
log "=== Deployment complete ==="Flexible log rotation script with compression, size-based rotation, and configurable retention policies.
#!/bin/bash
# Log Rotation Script with compression and retention
set -euo pipefail
LOG_DIR="${1:-/var/log/myapp}"
MAX_SIZE_MB="${2:-50}"
KEEP_DAYS="${3:-30}"
COMPRESS="${4:-true}"
log() { echo "[$(date '+%Y-%m-%d %H:%M:%S')] $1"; }
log "=== Log Rotation: $LOG_DIR ==="
log "Max size: ${MAX_SIZE_MB}MB | Retain: ${KEEP_DAYS} days | Compress: $COMPRESS"
# Find log files exceeding max size
MAX_SIZE_BYTES=$((MAX_SIZE_MB * 1024 * 1024))
find "$LOG_DIR" -name "*.log" -type f | while read -r logfile; do
FILE_SIZE=$(stat -f%z "$logfile" 2>/dev/null || stat -c%s "$logfile" 2>/dev/null)
if [ "$FILE_SIZE" -gt "$MAX_SIZE_BYTES" ]; then
ROTATED="${logfile}.$(date +%Y%m%d_%H%M%S)"
cp "$logfile" "$ROTATED"
truncate -s 0 "$logfile"
log "Rotated: $(basename $logfile) ($(numfmt --to=iec $FILE_SIZE))"
if [ "$COMPRESS" = "true" ]; then
gzip "$ROTATED"
log "Compressed: $(basename $ROTATED).gz"
fi
fi
done
# Remove old rotated logs
OLD_COUNT=$(find "$LOG_DIR" \( -name "*.log.*.gz" -o -name "*.log.[0-9]*" \) -mtime +$KEEP_DAYS | wc -l)
if [ "$OLD_COUNT" -gt 0 ]; then
find "$LOG_DIR" \( -name "*.log.*.gz" -o -name "*.log.[0-9]*" \) -mtime +$KEEP_DAYS -delete
log "Removed $OLD_COUNT old log files"
fi
# Summary
TOTAL_SIZE=$(du -sh "$LOG_DIR" | cut -f1)
FILE_COUNT=$(find "$LOG_DIR" -type f | wc -l)
log "Directory: $TOTAL_SIZE total, $FILE_COUNT files"
log "=== Rotation complete ==="Comprehensive user analytics SQL query with cohort analysis, retention rates, and activity metrics using window functions.
-- User Analytics: Cohort retention, activity metrics, and growth
-- Works with PostgreSQL 12+
-- Monthly cohort retention analysis
WITH user_cohorts AS (
SELECT
u.id AS user_id,
DATE_TRUNC('month', u.created_at) AS cohort_month,
DATE_TRUNC('month', a.activity_date) AS activity_month
FROM users u
LEFT JOIN user_activity a ON u.id = a.user_id
),
cohort_size AS (
SELECT cohort_month, COUNT(DISTINCT user_id) AS total_users
FROM user_cohorts
GROUP BY cohort_month
),
retention AS (
SELECT
uc.cohort_month,
EXTRACT(MONTH FROM AGE(uc.activity_month, uc.cohort_month)) AS month_number,
COUNT(DISTINCT uc.user_id) AS active_users
FROM user_cohorts uc
WHERE uc.activity_month IS NOT NULL
GROUP BY uc.cohort_month, month_number
)
SELECT
TO_CHAR(r.cohort_month, 'YYYY-MM') AS cohort,
cs.total_users,
r.month_number,
r.active_users,
ROUND(100.0 * r.active_users / cs.total_users, 1) AS retention_pct
FROM retention r
JOIN cohort_size cs ON r.cohort_month = cs.cohort_month
WHERE r.month_number BETWEEN 0 AND 12
ORDER BY r.cohort_month, r.month_number;
-- Daily active users with 7-day moving average
SELECT
activity_date,
COUNT(DISTINCT user_id) AS dau,
ROUND(AVG(COUNT(DISTINCT user_id)) OVER (
ORDER BY activity_date ROWS BETWEEN 6 PRECEDING AND CURRENT ROW
), 0) AS dau_7d_avg
FROM user_activity
WHERE activity_date >= CURRENT_DATE - INTERVAL '90 days'
GROUP BY activity_date
ORDER BY activity_date;Cursor-based and keyset pagination patterns that outperform OFFSET for large datasets, with total count optimization.
-- Efficient Pagination Patterns for PostgreSQL
-- Avoid OFFSET for large tables; use keyset pagination instead
-- Pattern 1: Keyset Pagination (recommended for large datasets)
-- First page
SELECT id, title, created_at, status
FROM articles
WHERE status = 'published'
ORDER BY created_at DESC, id DESC
LIMIT 20;
-- Subsequent pages (use last row's values as cursor)
SELECT id, title, created_at, status
FROM articles
WHERE status = 'published'
AND (created_at, id) < ('2025-01-15 10:30:00', 12345)
ORDER BY created_at DESC, id DESC
LIMIT 20;
-- Pattern 2: Optimized COUNT with estimate for total
-- Exact count (expensive for large tables)
SELECT COUNT(*) FROM articles WHERE status = 'published';
-- Fast estimate using statistics (PostgreSQL)
SELECT reltuples::bigint AS estimated_count
FROM pg_class
WHERE relname = 'articles';
-- Pattern 3: Window function pagination (when you need total)
WITH paginated AS (
SELECT
id, title, created_at,
COUNT(*) OVER() AS total_count
FROM articles
WHERE status = 'published'
ORDER BY created_at DESC
LIMIT 20 OFFSET 0
)
SELECT *, total_count FROM paginated;
-- Pattern 4: Indexed pagination with covering index
-- Create covering index for common query
CREATE INDEX idx_articles_published_covering
ON articles (created_at DESC, id DESC)
INCLUDE (title, status)
WHERE status = 'published';PostgreSQL full-text search configuration with weighted columns, custom dictionary, trigram similarity, and search ranking.
-- PostgreSQL Full-Text Search with ranking and highlights
-- Enable required extensions
CREATE EXTENSION IF NOT EXISTS pg_trgm;
CREATE EXTENSION IF NOT EXISTS unaccent;
-- Add search vector column
ALTER TABLE articles ADD COLUMN IF NOT EXISTS
search_vector tsvector;
-- Populate search vector with weighted fields
UPDATE articles SET search_vector =
setweight(to_tsvector('english', COALESCE(title, '')), 'A') ||
setweight(to_tsvector('english', COALESCE(description, '')), 'B') ||
setweight(to_tsvector('english', COALESCE(body, '')), 'C') ||
setweight(to_tsvector('english', COALESCE(array_to_string(tags, ' '), '')), 'B');
-- Create GIN index for fast search
CREATE INDEX idx_articles_search ON articles USING GIN (search_vector);
-- Trigram index for fuzzy matching
CREATE INDEX idx_articles_title_trgm ON articles USING GIN (title gin_trgm_ops);
-- Auto-update trigger
CREATE OR REPLACE FUNCTION articles_search_trigger() RETURNS trigger AS $$
BEGIN
NEW.search_vector :=
setweight(to_tsvector('english', COALESCE(NEW.title, '')), 'A') ||
setweight(to_tsvector('english', COALESCE(NEW.description, '')), 'B') ||
setweight(to_tsvector('english', COALESCE(NEW.body, '')), 'C');
RETURN NEW;
END;
$$ LANGUAGE plpgsql;
CREATE TRIGGER trg_articles_search
BEFORE INSERT OR UPDATE ON articles
FOR EACH ROW EXECUTE FUNCTION articles_search_trigger();
-- Search query with ranking and highlights
SELECT
id, title,
ts_rank(search_vector, query) AS rank,
ts_headline('english', body, query,
'StartSel=<mark>, StopSel=</mark>, MaxWords=35') AS snippet
FROM articles, plainto_tsquery('english', 'node.js authentication') AS query
WHERE search_vector @@ query
ORDER BY rank DESC
LIMIT 20;SQL queries to identify missing indexes, unused indexes, duplicate indexes, and index bloat in PostgreSQL databases.
-- PostgreSQL Index Optimization Toolkit
-- 1. Find missing indexes (tables with sequential scans)
SELECT
schemaname, relname AS table_name,
seq_scan, seq_tup_read,
idx_scan, idx_tup_fetch,
n_tup_ins + n_tup_upd + n_tup_del AS writes,
pg_size_pretty(pg_relation_size(relid)) AS table_size
FROM pg_stat_user_tables
WHERE seq_scan > 1000
AND idx_scan < seq_scan / 2
AND pg_relation_size(relid) > 10 * 1024 * 1024
ORDER BY seq_tup_read DESC
LIMIT 20;
-- 2. Find unused indexes (candidates for removal)
SELECT
s.schemaname, s.relname AS table_name,
s.indexrelname AS index_name,
s.idx_scan AS times_used,
pg_size_pretty(pg_relation_size(i.indexrelid)) AS index_size,
i.indisunique AS is_unique
FROM pg_stat_user_indexes s
JOIN pg_index i ON s.indexrelid = i.indexrelid
WHERE s.idx_scan < 50
AND NOT i.indisunique
AND NOT i.indisprimary
AND pg_relation_size(i.indexrelid) > 1024 * 1024
ORDER BY pg_relation_size(i.indexrelid) DESC;
-- 3. Find duplicate indexes
SELECT
a.indrelid::regclass AS table_name,
a.indexrelid::regclass AS index_a,
b.indexrelid::regclass AS index_b,
pg_size_pretty(pg_relation_size(a.indexrelid)) AS size_a
FROM pg_index a
JOIN pg_index b ON a.indrelid = b.indrelid
AND a.indexrelid < b.indexrelid
AND a.indkey::text = b.indkey::text
WHERE a.indrelid::regclass::text NOT LIKE 'pg_%';
-- 4. Index bloat estimation
SELECT
current_database(), nspname AS schema, tblname AS table,
idxname AS index, bs*(relpages)::bigint AS real_size,
bs*(relpages-est_pages)::bigint AS bloat_size,
ROUND(100 * (relpages-est_pages)::float / relpages, 1) AS bloat_pct
FROM (
SELECT *, (CASE WHEN relpages > est_pages THEN relpages ELSE est_pages END) AS safe_pages
FROM (SELECT coalesce(1 + ceil(reltuples / floor((bs-pageopqdata-pagehdr) /
(4+nulldatahdrwidth)::float)), 0) AS est_pages, bs, nspname, tblname,
idxname, relpages, reltuples
FROM (SELECT 8192 AS bs, 24 AS pageopqdata, 8 AS pagehdr,
CASE WHEN max(coalesce(s.null_frac, 0)) > 0 THEN 2 ELSE 0 END +
max(coalesce(s.avg_width, 0)) AS nulldatahdrwidth,
n.nspname, c.relname AS tblname, i.relname AS idxname,
i.relpages, i.reltuples
FROM pg_index x
JOIN pg_class c ON c.oid = x.indrelid
JOIN pg_class i ON i.oid = x.indexrelid
JOIN pg_namespace n ON n.oid = c.relnamespace
JOIN pg_stats s ON s.tablename = c.relname AND s.attname = ANY(
ARRAY(SELECT a.attname FROM pg_attribute a WHERE a.attrelid = x.indrelid
AND a.attnum = ANY(x.indkey)))
WHERE n.nspname = 'public'
GROUP BY n.nspname, c.relname, i.relname, i.relpages, i.reltuples
) AS est ) AS est2
) AS est3
WHERE relpages > 10 AND (relpages - est_pages) > 0
ORDER BY bloat_size DESC LIMIT 20;Type-safe API client using generics with request/response interceptors, error handling, and automatic token refresh.
interface ApiResponse<T> {
data: T;
status: number;
message?: string;
}
interface RequestConfig {
headers?: Record<string, string>;
params?: Record<string, string>;
signal?: AbortSignal;
}
class ApiClient {
private baseUrl: string;
private defaultHeaders: Record<string, string>;
constructor(baseUrl: string, token?: string) {
this.baseUrl = baseUrl.replace(/\/$/, '');
this.defaultHeaders = {
'Content-Type': 'application/json',
...(token ? { Authorization: `Bearer ${token}` } : {}),
};
}
private buildUrl(endpoint: string, params?: Record<string, string>): string {
const url = new URL(`${this.baseUrl}/${endpoint.replace(/^\//, '')}`);
if (params) {
Object.entries(params).forEach(([k, v]) => url.searchParams.set(k, v));
}
return url.toString();
}
private async request<T>(method: string, endpoint: string, body?: unknown, config?: RequestConfig): Promise<ApiResponse<T>> {
const url = this.buildUrl(endpoint, config?.params);
const res = await fetch(url, {
method,
headers: { ...this.defaultHeaders, ...config?.headers },
body: body ? JSON.stringify(body) : undefined,
signal: config?.signal,
});
if (!res.ok) {
const error = await res.json().catch(() => ({ message: res.statusText }));
throw new ApiError(res.status, error.message || 'Request failed');
}
const data = await res.json() as T;
return { data, status: res.status };
}
get<T>(endpoint: string, config?: RequestConfig) { return this.request<T>('GET', endpoint, undefined, config); }
post<T>(endpoint: string, body: unknown, config?: RequestConfig) { return this.request<T>('POST', endpoint, body, config); }
put<T>(endpoint: string, body: unknown, config?: RequestConfig) { return this.request<T>('PUT', endpoint, body, config); }
delete<T>(endpoint: string, config?: RequestConfig) { return this.request<T>('DELETE', endpoint, undefined, config); }
}
class ApiError extends Error {
constructor(public status: number, message: string) {
super(message);
this.name = 'ApiError';
}
}
// Usage
interface User { id: string; name: string; email: string; }
const api = new ApiClient('https://api.example.com', 'your-token');
const { data: users } = await api.get<User[]>('/users', { params: { limit: '10' } });Production-ready web scraper with retry logic, rate limiting, user-agent rotation, and structured data extraction using BeautifulSoup.
import requests
from bs4 import BeautifulSoup
import time
import random
from dataclasses import dataclass
from typing import List, Optional
USER_AGENTS = [
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36",
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36",
]
@dataclass
class ScrapedItem:
title: str
url: str
price: Optional[str] = None
description: Optional[str] = None
def scrape_page(url: str, retries: int = 3) -> Optional[BeautifulSoup]:
for attempt in range(retries):
try:
headers = {"User-Agent": random.choice(USER_AGENTS)}
response = requests.get(url, headers=headers, timeout=10)
response.raise_for_status()
return BeautifulSoup(response.text, "html.parser")
except requests.RequestException as e:
print(f"Attempt {attempt + 1} failed: {e}")
if attempt < retries - 1:
time.sleep(2 ** attempt + random.random())
return None
def extract_items(soup: BeautifulSoup, selector: str) -> List[ScrapedItem]:
items = []
for el in soup.select(selector):
title = el.select_one("h2, h3, .title")
link = el.select_one("a[href]")
price = el.select_one(".price, .cost")
if title:
items.append(ScrapedItem(
title=title.get_text(strip=True),
url=link["href"] if link else "",
price=price.get_text(strip=True) if price else None,
))
return items
if __name__ == "__main__":
soup = scrape_page("https://example.com/products")
if soup:
items = extract_items(soup, ".product-card")
for item in items:
print(f"{item.title} - {item.price}")Flexible CSV processor with support for filtering, transforming, aggregating, and exporting data using pandas.
import pandas as pd
from pathlib import Path
from typing import Dict, List, Optional, Callable
class CSVProcessor:
def __init__(self, filepath: str, encoding: str = "utf-8"):
self.filepath = Path(filepath)
self.df = pd.read_csv(filepath, encoding=encoding)
self.original_shape = self.df.shape
print(f"Loaded {self.original_shape[0]} rows, {self.original_shape[1]} columns")
def filter_rows(self, column: str, condition: Callable) -> "CSVProcessor":
self.df = self.df[self.df[column].apply(condition)]
return self
def rename_columns(self, mapping: Dict[str, str]) -> "CSVProcessor":
self.df = self.df.rename(columns=mapping)
return self
def drop_duplicates(self, subset: Optional[List[str]] = None) -> "CSVProcessor":
before = len(self.df)
self.df = self.df.drop_duplicates(subset=subset)
print(f"Removed {before - len(self.df)} duplicates")
return self
def fill_missing(self, column: str, value) -> "CSVProcessor":
self.df[column] = self.df[column].fillna(value)
return self
def aggregate(self, group_by: str, agg_dict: Dict[str, str]) -> pd.DataFrame:
return self.df.groupby(group_by).agg(agg_dict).reset_index()
def export(self, output: str, index: bool = False) -> None:
self.df.to_csv(output, index=index)
print(f"Exported {len(self.df)} rows to {output}")
def summary(self) -> None:
print(f"\nShape: {self.df.shape}")
print(f"Columns: {list(self.df.columns)}")
print(f"Missing values:\n{self.df.isnull().sum()}")
if __name__ == "__main__":
processor = CSVProcessor("data/sales.csv")
processor.drop_duplicates(["order_id"]) \
.filter_rows("amount", lambda x: x > 0) \
.fill_missing("region", "Unknown") \
.export("data/cleaned_sales.csv")Reusable API client class with session management, automatic retries, token refresh, and response caching.
import requests
from requests.adapters import HTTPAdapter
from urllib3.util.retry import Retry
import time
from typing import Optional, Dict, Any
class APIClient:
def __init__(self, base_url: str, api_key: Optional[str] = None, timeout: int = 30):
self.base_url = base_url.rstrip("/")
self.timeout = timeout
self.session = requests.Session()
# Configure retry strategy
retry = Retry(total=3, backoff_factor=1, status_forcelist=[429, 500, 502, 503])
adapter = HTTPAdapter(max_retries=retry)
self.session.mount("http://", adapter)
self.session.mount("https://", adapter)
if api_key:
self.session.headers.update({"Authorization": f"Bearer {api_key}"})
self.session.headers.update({
"Content-Type": "application/json",
"Accept": "application/json",
})
def _request(self, method: str, endpoint: str, **kwargs) -> Dict[str, Any]:
url = f"{self.base_url}/{endpoint.lstrip('/')}"
response = self.session.request(method, url, timeout=self.timeout, **kwargs)
if response.status_code == 429:
retry_after = int(response.headers.get("Retry-After", 5))
time.sleep(retry_after)
response = self.session.request(method, url, timeout=self.timeout, **kwargs)
response.raise_for_status()
return response.json() if response.content else {}
def get(self, endpoint: str, params: Optional[Dict] = None) -> Dict:
return self._request("GET", endpoint, params=params)
def post(self, endpoint: str, data: Optional[Dict] = None) -> Dict:
return self._request("POST", endpoint, json=data)
def put(self, endpoint: str, data: Optional[Dict] = None) -> Dict:
return self._request("PUT", endpoint, json=data)
def delete(self, endpoint: str) -> Dict:
return self._request("DELETE", endpoint)
if __name__ == "__main__":
client = APIClient("https://api.example.com", api_key="your-key")
users = client.get("/users", params={"page": 1, "limit": 10})
print(f"Found {len(users.get('data', []))} users")SMTP email sender supporting HTML templates, attachments, and batch sending with rate limiting.
import smtplib
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from email.mime.base import MIMEBase
from email import encoders
from pathlib import Path
from string import Template
from typing import List, Optional
import time
class EmailSender:
def __init__(self, host: str, port: int, username: str, password: str):
self.host = host
self.port = port
self.username = username
self.password = password
def _connect(self) -> smtplib.SMTP:
server = smtplib.SMTP(self.host, self.port)
server.starttls()
server.login(self.username, self.password)
return server
def send(self, to: str, subject: str, html_body: str,
attachments: Optional[List[str]] = None) -> bool:
msg = MIMEMultipart()
msg["From"] = self.username
msg["To"] = to
msg["Subject"] = subject
msg.attach(MIMEText(html_body, "html"))
for filepath in (attachments or []):
path = Path(filepath)
if path.exists():
part = MIMEBase("application", "octet-stream")
part.set_payload(path.read_bytes())
encoders.encode_base64(part)
part.add_header("Content-Disposition", f"attachment; filename={path.name}")
msg.attach(part)
try:
server = self._connect()
server.sendmail(self.username, to, msg.as_string())
server.quit()
return True
except Exception as e:
print(f"Failed to send to {to}: {e}")
return False
def send_template(self, to: str, subject: str, template_path: str,
variables: dict) -> bool:
template = Template(Path(template_path).read_text())
html = template.safe_substitute(variables)
return self.send(to, subject, html)
def send_batch(self, recipients: List[str], subject: str,
html_body: str, delay: float = 1.0) -> dict:
results = {"sent": 0, "failed": 0}
for email in recipients:
if self.send(email, subject, html_body):
results["sent"] += 1
else:
results["failed"] += 1
time.sleep(delay)
return resultsSelenium WebDriver wrapper with headless mode, wait utilities, screenshot capture, and cookie management.
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException
import json
from pathlib import Path
class BrowserAutomation:
def __init__(self, headless: bool = True, timeout: int = 10):
options = Options()
if headless:
options.add_argument("--headless=new")
options.add_argument("--no-sandbox")
options.add_argument("--disable-dev-shm-usage")
options.add_argument("--window-size=1920,1080")
options.add_argument("user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64)")
self.driver = webdriver.Chrome(options=options)
self.driver.implicitly_wait(timeout)
self.wait = WebDriverWait(self.driver, timeout)
def navigate(self, url: str) -> "BrowserAutomation":
self.driver.get(url)
return self
def click(self, selector: str, by: str = "css") -> "BrowserAutomation":
locator = By.CSS_SELECTOR if by == "css" else By.XPATH
self.wait.until(EC.element_to_be_clickable((locator, selector))).click()
return self
def fill(self, selector: str, text: str) -> "BrowserAutomation":
el = self.wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, selector)))
el.clear()
el.send_keys(text)
return self
def get_text(self, selector: str) -> str:
el = self.wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, selector)))
return el.text
def screenshot(self, filename: str = "screenshot.png") -> None:
self.driver.save_screenshot(filename)
def save_cookies(self, path: str = "cookies.json") -> None:
Path(path).write_text(json.dumps(self.driver.get_cookies()))
def load_cookies(self, path: str = "cookies.json") -> None:
cookies = json.loads(Path(path).read_text())
for cookie in cookies:
self.driver.add_cookie(cookie)
def close(self) -> None:
self.driver.quit()
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
if __name__ == "__main__":
with BrowserAutomation(headless=True) as browser:
browser.navigate("https://example.com")
title = browser.driver.title
print(f"Page title: {title}")Robust MongoDB connection handler with automatic retry logic, connection pooling, and graceful shutdown support.
const mongoose = require('mongoose');
const MAX_RETRIES = 5;
const RETRY_DELAY = 3000;
async function connectDB(retryCount = 0) {
try {
await mongoose.connect(process.env.MONGO_URI, {
maxPoolSize: 10,
serverSelectionTimeoutMS: 5000,
socketTimeoutMS: 45000,
});
console.log('MongoDB connected successfully');
mongoose.connection.on('error', (err) => {
console.error('MongoDB connection error:', err);
});
mongoose.connection.on('disconnected', () => {
console.warn('MongoDB disconnected. Attempting reconnect...');
});
} catch (err) {
console.error(`MongoDB connection attempt ${retryCount + 1} failed:`, err.message);
if (retryCount < MAX_RETRIES) {
console.log(`Retrying in ${RETRY_DELAY / 1000}s...`);
await new Promise(r => setTimeout(r, RETRY_DELAY));
return connectDB(retryCount + 1);
}
throw new Error('Failed to connect to MongoDB after max retries');
}
}
process.on('SIGINT', async () => {
await mongoose.connection.close();
console.log('MongoDB connection closed gracefully');
process.exit(0);
});
module.exports = connectDB;Centralized error handling middleware for Express apps with proper error classification, logging, and consistent JSON responses.
class AppError extends Error {
constructor(message, statusCode, code = 'INTERNAL_ERROR') {
super(message);
this.statusCode = statusCode;
this.code = code;
this.isOperational = true;
Error.captureStackTrace(this, this.constructor);
}
}
const errorHandler = (err, req, res, next) => {
err.statusCode = err.statusCode || 500;
// Log error (only stack for 500s)
if (err.statusCode >= 500) {
console.error(`[${new Date().toISOString()}] ${err.stack}`);
}
// Mongoose validation error
if (err.name === 'ValidationError') {
const messages = Object.values(err.errors).map(e => e.message);
return res.status(400).json({
success: false,
code: 'VALIDATION_ERROR',
errors: messages
});
}
// Mongoose duplicate key
if (err.code === 11000) {
const field = Object.keys(err.keyValue)[0];
return res.status(409).json({
success: false,
code: 'DUPLICATE_KEY',
message: `${field} already exists`
});
}
res.status(err.statusCode).json({
success: false,
code: err.code || 'INTERNAL_ERROR',
message: err.isOperational ? err.message : 'Something went wrong'
});
};
module.exports = { AppError, errorHandler };In-memory rate limiter using a sliding window algorithm. Supports configurable limits per IP with automatic cleanup.
class SlidingWindowRateLimiter {
constructor({ windowMs = 60000, maxRequests = 100 } = {}) {
this.windowMs = windowMs;
this.maxRequests = maxRequests;
this.clients = new Map();
// Cleanup expired entries every minute
setInterval(() => this.cleanup(), 60000);
}
cleanup() {
const now = Date.now();
for (const [key, timestamps] of this.clients) {
const valid = timestamps.filter(t => now - t < this.windowMs);
if (valid.length === 0) this.clients.delete(key);
else this.clients.set(key, valid);
}
}
isAllowed(key) {
const now = Date.now();
const timestamps = this.clients.get(key) || [];
const valid = timestamps.filter(t => now - t < this.windowMs);
valid.push(now);
this.clients.set(key, valid);
return {
allowed: valid.length <= this.maxRequests,
remaining: Math.max(0, this.maxRequests - valid.length),
resetMs: valid.length > 0 ? this.windowMs - (now - valid[0]) : 0
};
}
middleware() {
return (req, res, next) => {
const key = req.ip || req.connection.remoteAddress;
const { allowed, remaining, resetMs } = this.isAllowed(key);
res.setHeader('X-RateLimit-Limit', this.maxRequests);
res.setHeader('X-RateLimit-Remaining', remaining);
if (!allowed) {
return res.status(429).json({
error: 'Too many requests',
retryAfter: Math.ceil(resetMs / 1000)
});
}
next();
};
}
}
module.exports = SlidingWindowRateLimiter;Multer-based file upload middleware with MIME type validation, file size limits, and secure filename generation.
const multer = require('multer');
const path = require('path');
const crypto = require('crypto');
const ALLOWED_TYPES = {
'image/jpeg': '.jpg',
'image/png': '.png',
'image/webp': '.webp',
'application/pdf': '.pdf'
};
const MAX_SIZE = 5 * 1024 * 1024; // 5MB
const storage = multer.diskStorage({
destination: (req, file, cb) => {
cb(null, path.join(__dirname, '../uploads'));
},
filename: (req, file, cb) => {
const uniqueId = crypto.randomBytes(16).toString('hex');
const ext = ALLOWED_TYPES[file.mimetype] || path.extname(file.originalname);
cb(null, `${Date.now()}-${uniqueId}${ext}`);
}
});
const fileFilter = (req, file, cb) => {
if (ALLOWED_TYPES[file.mimetype]) {
cb(null, true);
} else {
cb(new Error(`File type ${file.mimetype} not allowed. Accepted: ${Object.keys(ALLOWED_TYPES).join(', ')}`), false);
}
};
const upload = multer({
storage,
fileFilter,
limits: { fileSize: MAX_SIZE, files: 5 }
});
const handleUploadError = (err, req, res, next) => {
if (err instanceof multer.MulterError) {
if (err.code === 'LIMIT_FILE_SIZE') {
return res.status(400).json({ error: 'File too large (max 5MB)' });
}
return res.status(400).json({ error: err.message });
}
if (err) return res.status(400).json({ error: err.message });
next();
};
module.exports = { upload, handleUploadError };Express middleware that verifies JWT tokens from the Authorization header and attaches the decoded user to the request object.
const jwt = require('jsonwebtoken');
const authenticate = (req, res, next) => {
const authHeader = req.headers.authorization;
if (!authHeader || !authHeader.startsWith('Bearer ')) {
return res.status(401).json({ error: 'No token provided' });
}
const token = authHeader.split(' ')[1];
try {
const decoded = jwt.verify(token, process.env.JWT_SECRET);
req.user = decoded;
next();
} catch (err) {
if (err.name === 'TokenExpiredError') {
return res.status(401).json({ error: 'Token expired' });
}
return res.status(403).json({ error: 'Invalid token' });
}
};
const authorize = (...roles) => {
return (req, res, next) => {
if (!req.user || !roles.includes(req.user.role)) {
return res.status(403).json({ error: 'Insufficient permissions' });
}
next();
};
};
module.exports = { authenticate, authorize };