Added an example docker-compose for loadbalancing added better documentation to the auth mech.

This commit is contained in:
Rik Heijmann 2025-03-02 17:59:19 +01:00
parent 7b6175cd07
commit 4faf155f82
4 changed files with 336 additions and 41 deletions

View File

@ -1,3 +1,7 @@
[[bin]]
name = "Axium"
path = "src/main.rs"
[package] [package]
name = "Axium" name = "Axium"
version = "0.1.0" version = "0.1.0"

View File

@ -0,0 +1,252 @@
services:
# HAProxy Load Balancer
haproxy:
# Use the latest HAProxy image
image: haproxy:latest
# Map port 80 from the container to the host machine
ports:
- "80:80"
command:
- /bin/sh
- -c
- |
cat > /usr/local/etc/haproxy/haproxy.cfg << EOF
global
daemon
maxconn 256
defaults
mode http
timeout connect 5000ms
timeout client 50000ms
timeout server 50000ms
frontend http-in
bind *:80
default_backend servers
backend servers
balance roundrobin
server server1 axium:3000 check
server server2 axium2:3001 check
EOF
haproxy -f /usr/local/etc/haproxy/haproxy.cfg
# Depend on the Axium services and wait until they're ready
# Depend on both PostgreSQL databases and wait until they're healthy
depends_on:
axium:
condition: service_healthy
axium2:
condition: service_healthy
# Service for the Axium application
axium:
# Build the Docker image from the current directory using the specified Dockerfile
build:
context: .
dockerfile: Dockerfile
# Map ports from the container to the host machine
ports:
- "${SERVER_PORT:-3000}:${SERVER_PORT:-3000}" # Expose server port
# Environment variables for the service
environment:
# Set environment (e.g., development, production)
- ENVIRONMENT=${ENVIRONMENT:-development} # Default to development if not set
# Server settings
- SERVER_IP=${SERVER_IP:-0.0.0.0} # Default IP to listen on
- SERVER_PORT=${SERVER_PORT:-3000} # Default port to listen on
- SERVER_TRACE_ENABLED=${SERVER_TRACE_ENABLED:-true} # Enable tracing by default
- SERVER_WORKER_THREADS=${SERVER_WORKER_THREADS:-2} # Number of worker threads
# Database connection settings
- DATABASE_URL=postgres://${DATABASE_USER:-dbuser}:${DATABASE_PASSWORD:-1234}@pgpool/${DATABASE_DB:-axium}
- DATABASE_MAX_CONNECTIONS=${DATABASE_MAX_CONNECTIONS:-20} # Max database connections
- DATABASE_MIN_CONNECTIONS=${DATABASE_MIN_CONNECTIONS:-5} # Min database connections
# HTTPS settings
- SERVER_HTTPS_ENABLED=${SERVER_HTTPS_ENABLED:-false} # Disable HTTPS by default
- SERVER_HTTPS_HTTP2_ENABLED=${SERVER_HTTPS_HTTP2_ENABLED:-true} # Enable HTTP/2 for HTTPS
# Certificate paths for HTTPS
- SERVER_HTTPS_CERT_FILE_PATH=/app/certs/cert.pem
- SERVER_HTTPS_KEY_FILE_PATH=/app/certs/key.pem
# Rate limiting settings
- SERVER_RATE_LIMIT=${SERVER_RATE_LIMIT:-5} # Default rate limit
- SERVER_RATE_LIMIT_PERIOD=${SERVER_RATE_LIMIT_PERIOD:-1} # Rate limit period in seconds
# Compression settings
- SERVER_COMPRESSION_ENABLED=${SERVER_COMPRESSION_ENABLED:-true} # Enable compression by default
- SERVER_COMPRESSION_LEVEL=${SERVER_COMPRESSION_LEVEL:-6} # Compression level
# JWT secret key (change this in production!)
- JWT_SECRET_KEY=${JWT_SECRET_KEY:-Change me!} # VERY important to change this!
# Depend on the pgpool service and wait until it's healthy
depends_on:
pgpool:
condition: service_healthy
# Mount volumes for certificates
volumes:
- ./certs:/app/certs # Mount local certs directory to container
# Health check settings
healthcheck:
# Test the health of the service by checking the /health endpoint
test: ["CMD", "curl", "-f", "http://${SERVER_IP:-0.0.0.0}:${SERVER_PORT:-3000}/health"]
interval: 10s # Check every 10 seconds
timeout: 5s # Timeout after 5 seconds
retries: 3 # Retry up to 3 times
start_period: 15s # Wait 15 seconds before starting checks
# Resource limits for the service
deploy:
resources:
limits:
# Limit CPU usage (default: 0.5 cores)
cpus: '${AXIUM_CPU_LIMIT:-0.5}'
# Limit RAM usage (default: 512MB)
memory: ${AXIUM_MEMORY_LIMIT:-512M}
# Second instance of Axium application
axium2:
# Use the same build configuration as the first instance. Haven't found a way to bypass having to build the second container.
build:
context: .
dockerfile: Dockerfile
# Use the same environment variables and other settings as the first instance
environment:
- SERVER_PORT=${SERVER2_PORT:-3001} # Use a different port
ports:
- "${SERVER2_PORT:-3001}:${SERVER2_PORT:-3001}"
depends_on:
pgpool:
condition: service_healthy
# Health check settings
healthcheck:
# Test the health of the service by checking the /health endpoint
test: ["CMD", "curl", "-f", "http://${SERVER2_IP:-0.0.0.0}:${SERVER2_PORT:-3001}/health"]
interval: 10s # Check every 10 seconds
timeout: 5s # Timeout after 5 seconds
retries: 3 # Retry up to 3 times
start_period: 15s # Wait 15 seconds before starting checks
# Resource limits for the service
deploy:
resources:
limits:
# Limit CPU usage (default: 0.5 cores)
cpus: '${AXIUM_CPU_LIMIT:-0.5}'
# Limit RAM usage (default: 512MB)
memory: ${AXIUM_MEMORY_LIMIT:-512M}
# PostgreSQL connection pooler
pgpool:
# Use the Bitnami Pgpool-II image
image: bitnami/pgpool:4.6.0
# Map port 5432 from the container to the host machine
ports:
- "5432:5432"
# Environment variables for the service
environment:
- PGPOOL_BACKEND_NODES=0:db:5432,1:db2:5432
- PGPOOL_SR_CHECK_USER=${DATABASE_USER:-dbuser}
- PGPOOL_SR_CHECK_PASSWORD=${DATABASE_PASSWORD:-1234}
- PGPOOL_ENABLE_LOAD_BALANCING=yes
- PGPOOL_MAX_POOL=20
- PGPOOL_ADMIN_USERNAME=${PGPOOL_ADMIN_USERNAME:-pgpooladmin} # Add admin username
- PGPOOL_ADMIN_PASSWORD=${PGPOOL_ADMIN_PASSWORD:-adminpassword} # Add admin password
- PGPOOL_POSTGRES_USERNAME=${DATABASE_USER:-dbuser} # Add Postgres username
- PGPOOL_POSTGRES_PASSWORD=${DATABASE_PASSWORD:-1234} # Add Postgres password
# Depend on both PostgreSQL databases and wait until they're healthy
depends_on:
db:
condition: service_healthy
db2:
condition: service_healthy
# Health check settings
healthcheck:
# Test the health of the service using pg_isready
test: ["CMD", "pg_isready", "-h", "localhost", "-p", "5432", "-U", "${DATABASE_USER:-dbuser}"]
interval: 10s # Check every 10 seconds
timeout: 5s # Timeout after 5 seconds
retries: 5 # Retry up to 5 times
# Primary PostgreSQL database
db:
# Use the official PostgreSQL 17 Alpine image
image: postgres:17-alpine
# Always restart the container if it fails
restart: always
ports:
- "5433:5432"
# Environment variables for the database
environment:
- POSTGRES_USER=${DATABASE_USER:-dbuser}
- POSTGRES_PASSWORD=${DATABASE_PASSWORD:-1234}
- POSTGRES_DB=${DATABASE_DB:-axium}
- POSTGRESQL_REPLICATION_MODE=master
- POSTGRESQL_REPLICATION_USER=repl_user
- POSTGRESQL_REPLICATION_PASSWORD=repl_user
# Mount volumes for database data and logs
volumes:
- ./docker/db/data:/var/lib/postgresql/data
- ./docker/db/logs:/var/log/postgresql
# Health check settings for the database
healthcheck:
# Test the health of the database using pg_isready
test: ["CMD", "pg_isready", "-U", "${DATABASE_USER:-dbuser}"]
interval: 60s # Check every minute
timeout: 10s # Timeout after 10 seconds
retries: 5 # Retry up to 5 times
start_period: 15s # Wait 15 seconds before starting checks
# Resource limits for the database service
deploy:
resources:
limits:
# Limit CPU usage (default: 0.5 cores)
cpus: '${DB_CPU_LIMIT:-0.5}'
# Limit RAM usage (default: 256MB)
memory: ${DB_MEMORY_LIMIT:-256M}
# Secondary PostgreSQL database for failover
db2:
# Use the official PostgreSQL 17 Alpine image
image: postgres:17-alpine
# Always restart the container if it fails
restart: always
ports:
- "5434:5432" # Different port for the slave database
# Environment variables for the database
environment:
- POSTGRES_USER=${DATABASE_USER:-dbuser}
- POSTGRES_PASSWORD=${DATABASE_PASSWORD:-1234}
- POSTGRES_DB=${DATABASE_DB:-axium}
- POSTGRESQL_REPLICATION_MODE=slave
- POSTGRESQL_MASTER_HOST=db
- POSTGRESQL_MASTER_PORT_NUMBER=5432
- POSTGRESQL_REPLICATION_USER=repl_user
- POSTGRESQL_REPLICATION_PASSWORD=repl_user
# Mount volumes for database data and logs
volumes:
- ./docker/db2/data:/var/lib/postgresql/data
- ./docker/db2/logs:/var/log/postgresql
# Health check settings for the database
healthcheck:
# Test the health of the database using pg_isready
test: ["CMD", "pg_isready", "-U", "${DATABASE_USER:-dbuser}"]
interval: 60s # Check every minute
timeout: 10s # Timeout after 10 seconds
retries: 5 # Retry up to 5 times
start_period: 15s # Wait 15 seconds before starting checks
# Resource limits for the database service
deploy:
resources:
limits:
# Limit CPU usage (default: 0.5 cores)
cpus: '${DB_CPU_LIMIT:-0.5}'
# Limit RAM usage (default: 256MB)
memory: ${DB_MEMORY_LIMIT:-256M}

View File

@ -12,18 +12,12 @@ use utoipa::ToSchema;
use crate::utils::auth::{encode_jwt, verify_hash}; use crate::utils::auth::{encode_jwt, verify_hash};
use crate::database::{apikeys::fetch_active_apikeys_by_user_id_from_db, users::fetch_user_by_email_from_db}; use crate::database::{apikeys::fetch_active_apikeys_by_user_id_from_db, users::fetch_user_by_email_from_db};
use crate::models::auth::SignInData;
#[derive(Deserialize, ToSchema)] /// User sign-in endpoint.
pub struct SignInData {
pub email: String,
pub password: String,
pub totp: Option<String>,
}
/// User sign-in endpoint
/// ///
/// This endpoint allows users to sign in using their email, password, and optionally a TOTP code. /// This endpoint allows users to sign in using their email, password, and optionally a TOTP code.
/// ///
/// # Parameters /// # Parameters
/// - `State(pool)`: The shared database connection pool. /// - `State(pool)`: The shared database connection pool.
/// - `Json(user_data)`: The user sign-in data (email, password, and optional TOTP code). /// - `Json(user_data)`: The user sign-in data (email, password, and optional TOTP code).
@ -48,91 +42,122 @@ pub async fn signin(
State(pool): State<PgPool>, State(pool): State<PgPool>,
Json(user_data): Json<SignInData>, Json(user_data): Json<SignInData>,
) -> Result<Json<serde_json::Value>, (StatusCode, Json<serde_json::Value>)> { ) -> Result<Json<serde_json::Value>, (StatusCode, Json<serde_json::Value>)> {
// Fetch the user from the database based on their email.
let user = match fetch_user_by_email_from_db(&pool, &user_data.email).await { let user = match fetch_user_by_email_from_db(&pool, &user_data.email).await {
Ok(Some(user)) => user, Ok(Some(user)) => user,
Ok(None) | Err(_) => return Err(( Ok(None) | Err(_) => {
StatusCode::UNAUTHORIZED, // If the user is not found or there's an error, return an unauthorized response.
Json(json!({ "error": "Incorrect credentials." })) return Err((
)), StatusCode::UNAUTHORIZED,
Json(json!({ "error": "Incorrect credentials." }))
));
}
}; };
// Fetch active API keys for the user.
let api_key_hashes = match fetch_active_apikeys_by_user_id_from_db(&pool, user.id).await { let api_key_hashes = match fetch_active_apikeys_by_user_id_from_db(&pool, user.id).await {
Ok(hashes) => hashes, Ok(hashes) => hashes,
Err(_) => return Err(( Err(_) => {
StatusCode::INTERNAL_SERVER_ERROR, // If there's an error fetching API keys, return an internal server error.
Json(json!({ "error": "Internal server error." })) return Err((
)), StatusCode::INTERNAL_SERVER_ERROR,
Json(json!({ "error": "Internal server error." }))
));
}
}; };
// Check API key first (async version) // Check if any of the API keys match the provided password.
let api_key_futures = api_key_hashes.iter().map(|api_key| { let api_key_futures = api_key_hashes.iter().map(|api_key| {
let password = user_data.password.clone(); let password = user_data.password.clone();
let hash = api_key.key_hash.clone(); let hash = api_key.key_hash.clone();
async move { async move {
// Verify the password against each API key hash.
verify_hash(&password, &hash) verify_hash(&password, &hash)
.await .await
.unwrap_or(false) .unwrap_or(false)
} }
}); });
// Wait for all API key verification futures to complete.
let any_api_key_valid = futures::future::join_all(api_key_futures) let any_api_key_valid = futures::future::join_all(api_key_futures)
.await .await
.into_iter() .into_iter()
.any(|result| result); .any(|result| result);
// Check password (async version) // Verify the user's password against their stored password hash.
let password_valid = verify_hash(&user_data.password, &user.password_hash) let password_valid = verify_hash(&user_data.password, &user.password_hash)
.await .await
.map_err(|_| ( .map_err(|_| {
StatusCode::INTERNAL_SERVER_ERROR, // If there's an error verifying the password, return an internal server error.
Json(json!({ "error": "Internal server error." })) (
))?; StatusCode::INTERNAL_SERVER_ERROR,
Json(json!({ "error": "Internal server error." }))
)
})?;
// Determine if the credentials are valid based on API keys or password.
let credentials_valid = any_api_key_valid || password_valid; let credentials_valid = any_api_key_valid || password_valid;
if !credentials_valid { if !credentials_valid {
// If credentials are not valid, return an unauthorized response.
return Err(( return Err((
StatusCode::UNAUTHORIZED, StatusCode::UNAUTHORIZED,
Json(json!({ "error": "Incorrect credentials." })) Json(json!({ "error": "Incorrect credentials." }))
)); ));
} }
// Check TOTP if it's set up for the user // Check TOTP if it's set up for the user.
if let Some(totp_secret) = user.totp_secret { if let Some(totp_secret) = user.totp_secret {
match user_data.totp { match user_data.totp {
Some(totp_code) => { Some(totp_code) => {
// Create a TOTP instance with the user's secret.
let totp = TOTP::new( let totp = TOTP::new(
Algorithm::SHA512, Algorithm::SHA512,
8, 8,
1, 1,
30, 30,
totp_secret.into_bytes(), totp_secret.into_bytes(),
).map_err(|_| ( ).map_err(|_| {
StatusCode::INTERNAL_SERVER_ERROR, // If there's an error creating the TOTP instance, return an internal server error.
Json(json!({ "error": "Internal server error." })) (
))?; StatusCode::INTERNAL_SERVER_ERROR,
Json(json!({ "error": "Internal server error." }))
)
})?;
// Check if the provided TOTP code is valid.
if !totp.check_current(&totp_code).unwrap_or(false) { if !totp.check_current(&totp_code).unwrap_or(false) {
// If the TOTP code is invalid, return an unauthorized response.
return Err(( return Err((
StatusCode::UNAUTHORIZED, StatusCode::UNAUTHORIZED,
Json(json!({ "error": "Invalid 2FA code." })) Json(json!({ "error": "Invalid 2FA code." }))
)); ));
} }
}, },
None => return Err(( None => {
StatusCode::BAD_REQUEST, // If TOTP is set up but no code is provided, return a bad request.
Json(json!({ "error": "2FA code required for this account." })) return Err((
)), StatusCode::BAD_REQUEST,
Json(json!({ "error": "2FA code required for this account." }))
));
}
} }
} }
// Generate a JWT token for the user.
let email = user.email.clone(); let email = user.email.clone();
let token = encode_jwt(user.email) let token = encode_jwt(user.email)
.map_err(|_| ( .map_err(|_| {
StatusCode::INTERNAL_SERVER_ERROR, // If there's an error generating the JWT, return an internal server error.
Json(json!({ "error": "Internal server error." })) (
))?; StatusCode::INTERNAL_SERVER_ERROR,
Json(json!({ "error": "Internal server error." }))
)
})?;
// Log the successful sign-in.
info!("User signed in: {}", email); info!("User signed in: {}", email);
// Return the JWT token in a JSON response.
Ok(Json(json!({ "token": token }))) Ok(Json(json!({ "token": token })))
} }

View File

@ -39,3 +39,17 @@ pub struct AuthError {
// Implement Error trait for AuthError if needed // Implement Error trait for AuthError if needed
// impl std::error::Error for AuthError {} // impl std::error::Error for AuthError {}
/// Data structure for user sign-in information.
///
/// This includes the user's email, password, and optionally a TOTP code.
#[derive(Deserialize, ToSchema)]
pub struct SignInData {
/// User's email address.
pub email: String,
/// User's password.
pub password: String,
/// Optional TOTP code for two-factor authentication.
pub totp: Option<String>,
}