mirror of
https://github.com/kristoferssolo/Axium.git
synced 2025-10-21 16:00:34 +00:00
Added an example docker-compose for loadbalancing added better documentation to the auth mech.
This commit is contained in:
parent
7b6175cd07
commit
4faf155f82
@ -1,3 +1,7 @@
|
||||
[[bin]]
|
||||
name = "Axium"
|
||||
path = "src/main.rs"
|
||||
|
||||
[package]
|
||||
name = "Axium"
|
||||
version = "0.1.0"
|
||||
|
||||
252
docker-compose.loadbalanced.yml
Normal file
252
docker-compose.loadbalanced.yml
Normal file
@ -0,0 +1,252 @@
|
||||
services:
|
||||
# HAProxy Load Balancer
|
||||
haproxy:
|
||||
# Use the latest HAProxy image
|
||||
image: haproxy:latest
|
||||
# Map port 80 from the container to the host machine
|
||||
ports:
|
||||
- "80:80"
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- |
|
||||
cat > /usr/local/etc/haproxy/haproxy.cfg << EOF
|
||||
global
|
||||
daemon
|
||||
maxconn 256
|
||||
|
||||
defaults
|
||||
mode http
|
||||
timeout connect 5000ms
|
||||
timeout client 50000ms
|
||||
timeout server 50000ms
|
||||
|
||||
frontend http-in
|
||||
bind *:80
|
||||
default_backend servers
|
||||
|
||||
backend servers
|
||||
balance roundrobin
|
||||
server server1 axium:3000 check
|
||||
server server2 axium2:3001 check
|
||||
EOF
|
||||
haproxy -f /usr/local/etc/haproxy/haproxy.cfg
|
||||
# Depend on the Axium services and wait until they're ready
|
||||
# Depend on both PostgreSQL databases and wait until they're healthy
|
||||
depends_on:
|
||||
axium:
|
||||
condition: service_healthy
|
||||
axium2:
|
||||
condition: service_healthy
|
||||
|
||||
# Service for the Axium application
|
||||
axium:
|
||||
# Build the Docker image from the current directory using the specified Dockerfile
|
||||
build:
|
||||
context: .
|
||||
dockerfile: Dockerfile
|
||||
|
||||
# Map ports from the container to the host machine
|
||||
ports:
|
||||
- "${SERVER_PORT:-3000}:${SERVER_PORT:-3000}" # Expose server port
|
||||
|
||||
# Environment variables for the service
|
||||
environment:
|
||||
# Set environment (e.g., development, production)
|
||||
- ENVIRONMENT=${ENVIRONMENT:-development} # Default to development if not set
|
||||
|
||||
# Server settings
|
||||
- SERVER_IP=${SERVER_IP:-0.0.0.0} # Default IP to listen on
|
||||
- SERVER_PORT=${SERVER_PORT:-3000} # Default port to listen on
|
||||
- SERVER_TRACE_ENABLED=${SERVER_TRACE_ENABLED:-true} # Enable tracing by default
|
||||
- SERVER_WORKER_THREADS=${SERVER_WORKER_THREADS:-2} # Number of worker threads
|
||||
|
||||
# Database connection settings
|
||||
- DATABASE_URL=postgres://${DATABASE_USER:-dbuser}:${DATABASE_PASSWORD:-1234}@pgpool/${DATABASE_DB:-axium}
|
||||
- DATABASE_MAX_CONNECTIONS=${DATABASE_MAX_CONNECTIONS:-20} # Max database connections
|
||||
- DATABASE_MIN_CONNECTIONS=${DATABASE_MIN_CONNECTIONS:-5} # Min database connections
|
||||
|
||||
# HTTPS settings
|
||||
- SERVER_HTTPS_ENABLED=${SERVER_HTTPS_ENABLED:-false} # Disable HTTPS by default
|
||||
- SERVER_HTTPS_HTTP2_ENABLED=${SERVER_HTTPS_HTTP2_ENABLED:-true} # Enable HTTP/2 for HTTPS
|
||||
|
||||
# Certificate paths for HTTPS
|
||||
- SERVER_HTTPS_CERT_FILE_PATH=/app/certs/cert.pem
|
||||
- SERVER_HTTPS_KEY_FILE_PATH=/app/certs/key.pem
|
||||
|
||||
# Rate limiting settings
|
||||
- SERVER_RATE_LIMIT=${SERVER_RATE_LIMIT:-5} # Default rate limit
|
||||
- SERVER_RATE_LIMIT_PERIOD=${SERVER_RATE_LIMIT_PERIOD:-1} # Rate limit period in seconds
|
||||
|
||||
# Compression settings
|
||||
- SERVER_COMPRESSION_ENABLED=${SERVER_COMPRESSION_ENABLED:-true} # Enable compression by default
|
||||
- SERVER_COMPRESSION_LEVEL=${SERVER_COMPRESSION_LEVEL:-6} # Compression level
|
||||
|
||||
# JWT secret key (change this in production!)
|
||||
- JWT_SECRET_KEY=${JWT_SECRET_KEY:-Change me!} # VERY important to change this!
|
||||
|
||||
# Depend on the pgpool service and wait until it's healthy
|
||||
depends_on:
|
||||
pgpool:
|
||||
condition: service_healthy
|
||||
|
||||
# Mount volumes for certificates
|
||||
volumes:
|
||||
- ./certs:/app/certs # Mount local certs directory to container
|
||||
|
||||
# Health check settings
|
||||
healthcheck:
|
||||
# Test the health of the service by checking the /health endpoint
|
||||
test: ["CMD", "curl", "-f", "http://${SERVER_IP:-0.0.0.0}:${SERVER_PORT:-3000}/health"]
|
||||
interval: 10s # Check every 10 seconds
|
||||
timeout: 5s # Timeout after 5 seconds
|
||||
retries: 3 # Retry up to 3 times
|
||||
start_period: 15s # Wait 15 seconds before starting checks
|
||||
|
||||
# Resource limits for the service
|
||||
deploy:
|
||||
resources:
|
||||
limits:
|
||||
# Limit CPU usage (default: 0.5 cores)
|
||||
cpus: '${AXIUM_CPU_LIMIT:-0.5}'
|
||||
# Limit RAM usage (default: 512MB)
|
||||
memory: ${AXIUM_MEMORY_LIMIT:-512M}
|
||||
|
||||
# Second instance of Axium application
|
||||
axium2:
|
||||
# Use the same build configuration as the first instance. Haven't found a way to bypass having to build the second container.
|
||||
build:
|
||||
context: .
|
||||
dockerfile: Dockerfile
|
||||
# Use the same environment variables and other settings as the first instance
|
||||
environment:
|
||||
- SERVER_PORT=${SERVER2_PORT:-3001} # Use a different port
|
||||
ports:
|
||||
- "${SERVER2_PORT:-3001}:${SERVER2_PORT:-3001}"
|
||||
depends_on:
|
||||
pgpool:
|
||||
condition: service_healthy
|
||||
# Health check settings
|
||||
healthcheck:
|
||||
# Test the health of the service by checking the /health endpoint
|
||||
test: ["CMD", "curl", "-f", "http://${SERVER2_IP:-0.0.0.0}:${SERVER2_PORT:-3001}/health"]
|
||||
interval: 10s # Check every 10 seconds
|
||||
timeout: 5s # Timeout after 5 seconds
|
||||
retries: 3 # Retry up to 3 times
|
||||
start_period: 15s # Wait 15 seconds before starting checks
|
||||
# Resource limits for the service
|
||||
deploy:
|
||||
resources:
|
||||
limits:
|
||||
# Limit CPU usage (default: 0.5 cores)
|
||||
cpus: '${AXIUM_CPU_LIMIT:-0.5}'
|
||||
# Limit RAM usage (default: 512MB)
|
||||
memory: ${AXIUM_MEMORY_LIMIT:-512M}
|
||||
|
||||
# PostgreSQL connection pooler
|
||||
pgpool:
|
||||
# Use the Bitnami Pgpool-II image
|
||||
image: bitnami/pgpool:4.6.0
|
||||
# Map port 5432 from the container to the host machine
|
||||
ports:
|
||||
- "5432:5432"
|
||||
# Environment variables for the service
|
||||
environment:
|
||||
- PGPOOL_BACKEND_NODES=0:db:5432,1:db2:5432
|
||||
- PGPOOL_SR_CHECK_USER=${DATABASE_USER:-dbuser}
|
||||
- PGPOOL_SR_CHECK_PASSWORD=${DATABASE_PASSWORD:-1234}
|
||||
- PGPOOL_ENABLE_LOAD_BALANCING=yes
|
||||
- PGPOOL_MAX_POOL=20
|
||||
- PGPOOL_ADMIN_USERNAME=${PGPOOL_ADMIN_USERNAME:-pgpooladmin} # Add admin username
|
||||
- PGPOOL_ADMIN_PASSWORD=${PGPOOL_ADMIN_PASSWORD:-adminpassword} # Add admin password
|
||||
- PGPOOL_POSTGRES_USERNAME=${DATABASE_USER:-dbuser} # Add Postgres username
|
||||
- PGPOOL_POSTGRES_PASSWORD=${DATABASE_PASSWORD:-1234} # Add Postgres password
|
||||
# Depend on both PostgreSQL databases and wait until they're healthy
|
||||
depends_on:
|
||||
db:
|
||||
condition: service_healthy
|
||||
db2:
|
||||
condition: service_healthy
|
||||
# Health check settings
|
||||
healthcheck:
|
||||
# Test the health of the service using pg_isready
|
||||
test: ["CMD", "pg_isready", "-h", "localhost", "-p", "5432", "-U", "${DATABASE_USER:-dbuser}"]
|
||||
interval: 10s # Check every 10 seconds
|
||||
timeout: 5s # Timeout after 5 seconds
|
||||
retries: 5 # Retry up to 5 times
|
||||
|
||||
# Primary PostgreSQL database
|
||||
db:
|
||||
# Use the official PostgreSQL 17 Alpine image
|
||||
image: postgres:17-alpine
|
||||
# Always restart the container if it fails
|
||||
restart: always
|
||||
ports:
|
||||
- "5433:5432"
|
||||
# Environment variables for the database
|
||||
environment:
|
||||
- POSTGRES_USER=${DATABASE_USER:-dbuser}
|
||||
- POSTGRES_PASSWORD=${DATABASE_PASSWORD:-1234}
|
||||
- POSTGRES_DB=${DATABASE_DB:-axium}
|
||||
- POSTGRESQL_REPLICATION_MODE=master
|
||||
- POSTGRESQL_REPLICATION_USER=repl_user
|
||||
- POSTGRESQL_REPLICATION_PASSWORD=repl_user
|
||||
# Mount volumes for database data and logs
|
||||
volumes:
|
||||
- ./docker/db/data:/var/lib/postgresql/data
|
||||
- ./docker/db/logs:/var/log/postgresql
|
||||
# Health check settings for the database
|
||||
healthcheck:
|
||||
# Test the health of the database using pg_isready
|
||||
test: ["CMD", "pg_isready", "-U", "${DATABASE_USER:-dbuser}"]
|
||||
interval: 60s # Check every minute
|
||||
timeout: 10s # Timeout after 10 seconds
|
||||
retries: 5 # Retry up to 5 times
|
||||
start_period: 15s # Wait 15 seconds before starting checks
|
||||
# Resource limits for the database service
|
||||
deploy:
|
||||
resources:
|
||||
limits:
|
||||
# Limit CPU usage (default: 0.5 cores)
|
||||
cpus: '${DB_CPU_LIMIT:-0.5}'
|
||||
# Limit RAM usage (default: 256MB)
|
||||
memory: ${DB_MEMORY_LIMIT:-256M}
|
||||
|
||||
# Secondary PostgreSQL database for failover
|
||||
db2:
|
||||
# Use the official PostgreSQL 17 Alpine image
|
||||
image: postgres:17-alpine
|
||||
# Always restart the container if it fails
|
||||
restart: always
|
||||
ports:
|
||||
- "5434:5432" # Different port for the slave database
|
||||
# Environment variables for the database
|
||||
environment:
|
||||
- POSTGRES_USER=${DATABASE_USER:-dbuser}
|
||||
- POSTGRES_PASSWORD=${DATABASE_PASSWORD:-1234}
|
||||
- POSTGRES_DB=${DATABASE_DB:-axium}
|
||||
- POSTGRESQL_REPLICATION_MODE=slave
|
||||
- POSTGRESQL_MASTER_HOST=db
|
||||
- POSTGRESQL_MASTER_PORT_NUMBER=5432
|
||||
- POSTGRESQL_REPLICATION_USER=repl_user
|
||||
- POSTGRESQL_REPLICATION_PASSWORD=repl_user
|
||||
# Mount volumes for database data and logs
|
||||
volumes:
|
||||
- ./docker/db2/data:/var/lib/postgresql/data
|
||||
- ./docker/db2/logs:/var/log/postgresql
|
||||
# Health check settings for the database
|
||||
healthcheck:
|
||||
# Test the health of the database using pg_isready
|
||||
test: ["CMD", "pg_isready", "-U", "${DATABASE_USER:-dbuser}"]
|
||||
interval: 60s # Check every minute
|
||||
timeout: 10s # Timeout after 10 seconds
|
||||
retries: 5 # Retry up to 5 times
|
||||
start_period: 15s # Wait 15 seconds before starting checks
|
||||
# Resource limits for the database service
|
||||
deploy:
|
||||
resources:
|
||||
limits:
|
||||
# Limit CPU usage (default: 0.5 cores)
|
||||
cpus: '${DB_CPU_LIMIT:-0.5}'
|
||||
# Limit RAM usage (default: 256MB)
|
||||
memory: ${DB_MEMORY_LIMIT:-256M}
|
||||
@ -12,18 +12,12 @@ use utoipa::ToSchema;
|
||||
|
||||
use crate::utils::auth::{encode_jwt, verify_hash};
|
||||
use crate::database::{apikeys::fetch_active_apikeys_by_user_id_from_db, users::fetch_user_by_email_from_db};
|
||||
use crate::models::auth::SignInData;
|
||||
|
||||
#[derive(Deserialize, ToSchema)]
|
||||
pub struct SignInData {
|
||||
pub email: String,
|
||||
pub password: String,
|
||||
pub totp: Option<String>,
|
||||
}
|
||||
|
||||
/// User sign-in endpoint
|
||||
/// User sign-in endpoint.
|
||||
///
|
||||
/// This endpoint allows users to sign in using their email, password, and optionally a TOTP code.
|
||||
///
|
||||
///
|
||||
/// # Parameters
|
||||
/// - `State(pool)`: The shared database connection pool.
|
||||
/// - `Json(user_data)`: The user sign-in data (email, password, and optional TOTP code).
|
||||
@ -48,91 +42,122 @@ pub async fn signin(
|
||||
State(pool): State<PgPool>,
|
||||
Json(user_data): Json<SignInData>,
|
||||
) -> Result<Json<serde_json::Value>, (StatusCode, Json<serde_json::Value>)> {
|
||||
// Fetch the user from the database based on their email.
|
||||
let user = match fetch_user_by_email_from_db(&pool, &user_data.email).await {
|
||||
Ok(Some(user)) => user,
|
||||
Ok(None) | Err(_) => return Err((
|
||||
StatusCode::UNAUTHORIZED,
|
||||
Json(json!({ "error": "Incorrect credentials." }))
|
||||
)),
|
||||
Ok(None) | Err(_) => {
|
||||
// If the user is not found or there's an error, return an unauthorized response.
|
||||
return Err((
|
||||
StatusCode::UNAUTHORIZED,
|
||||
Json(json!({ "error": "Incorrect credentials." }))
|
||||
));
|
||||
}
|
||||
};
|
||||
|
||||
// Fetch active API keys for the user.
|
||||
let api_key_hashes = match fetch_active_apikeys_by_user_id_from_db(&pool, user.id).await {
|
||||
Ok(hashes) => hashes,
|
||||
Err(_) => return Err((
|
||||
StatusCode::INTERNAL_SERVER_ERROR,
|
||||
Json(json!({ "error": "Internal server error." }))
|
||||
)),
|
||||
Err(_) => {
|
||||
// If there's an error fetching API keys, return an internal server error.
|
||||
return Err((
|
||||
StatusCode::INTERNAL_SERVER_ERROR,
|
||||
Json(json!({ "error": "Internal server error." }))
|
||||
));
|
||||
}
|
||||
};
|
||||
|
||||
// Check API key first (async version)
|
||||
|
||||
// Check if any of the API keys match the provided password.
|
||||
let api_key_futures = api_key_hashes.iter().map(|api_key| {
|
||||
let password = user_data.password.clone();
|
||||
let hash = api_key.key_hash.clone();
|
||||
async move {
|
||||
// Verify the password against each API key hash.
|
||||
verify_hash(&password, &hash)
|
||||
.await
|
||||
.unwrap_or(false)
|
||||
}
|
||||
});
|
||||
|
||||
|
||||
// Wait for all API key verification futures to complete.
|
||||
let any_api_key_valid = futures::future::join_all(api_key_futures)
|
||||
.await
|
||||
.into_iter()
|
||||
.any(|result| result);
|
||||
|
||||
// Check password (async version)
|
||||
|
||||
// Verify the user's password against their stored password hash.
|
||||
let password_valid = verify_hash(&user_data.password, &user.password_hash)
|
||||
.await
|
||||
.map_err(|_| (
|
||||
StatusCode::INTERNAL_SERVER_ERROR,
|
||||
Json(json!({ "error": "Internal server error." }))
|
||||
))?;
|
||||
|
||||
.map_err(|_| {
|
||||
// If there's an error verifying the password, return an internal server error.
|
||||
(
|
||||
StatusCode::INTERNAL_SERVER_ERROR,
|
||||
Json(json!({ "error": "Internal server error." }))
|
||||
)
|
||||
})?;
|
||||
|
||||
// Determine if the credentials are valid based on API keys or password.
|
||||
let credentials_valid = any_api_key_valid || password_valid;
|
||||
|
||||
|
||||
if !credentials_valid {
|
||||
// If credentials are not valid, return an unauthorized response.
|
||||
return Err((
|
||||
StatusCode::UNAUTHORIZED,
|
||||
Json(json!({ "error": "Incorrect credentials." }))
|
||||
));
|
||||
}
|
||||
|
||||
// Check TOTP if it's set up for the user
|
||||
// Check TOTP if it's set up for the user.
|
||||
if let Some(totp_secret) = user.totp_secret {
|
||||
match user_data.totp {
|
||||
Some(totp_code) => {
|
||||
// Create a TOTP instance with the user's secret.
|
||||
let totp = TOTP::new(
|
||||
Algorithm::SHA512,
|
||||
8,
|
||||
1,
|
||||
30,
|
||||
totp_secret.into_bytes(),
|
||||
).map_err(|_| (
|
||||
StatusCode::INTERNAL_SERVER_ERROR,
|
||||
Json(json!({ "error": "Internal server error." }))
|
||||
))?;
|
||||
).map_err(|_| {
|
||||
// If there's an error creating the TOTP instance, return an internal server error.
|
||||
(
|
||||
StatusCode::INTERNAL_SERVER_ERROR,
|
||||
Json(json!({ "error": "Internal server error." }))
|
||||
)
|
||||
})?;
|
||||
|
||||
// Check if the provided TOTP code is valid.
|
||||
if !totp.check_current(&totp_code).unwrap_or(false) {
|
||||
// If the TOTP code is invalid, return an unauthorized response.
|
||||
return Err((
|
||||
StatusCode::UNAUTHORIZED,
|
||||
Json(json!({ "error": "Invalid 2FA code." }))
|
||||
));
|
||||
}
|
||||
},
|
||||
None => return Err((
|
||||
StatusCode::BAD_REQUEST,
|
||||
Json(json!({ "error": "2FA code required for this account." }))
|
||||
)),
|
||||
None => {
|
||||
// If TOTP is set up but no code is provided, return a bad request.
|
||||
return Err((
|
||||
StatusCode::BAD_REQUEST,
|
||||
Json(json!({ "error": "2FA code required for this account." }))
|
||||
));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Generate a JWT token for the user.
|
||||
let email = user.email.clone();
|
||||
let token = encode_jwt(user.email)
|
||||
.map_err(|_| (
|
||||
StatusCode::INTERNAL_SERVER_ERROR,
|
||||
Json(json!({ "error": "Internal server error." }))
|
||||
))?;
|
||||
.map_err(|_| {
|
||||
// If there's an error generating the JWT, return an internal server error.
|
||||
(
|
||||
StatusCode::INTERNAL_SERVER_ERROR,
|
||||
Json(json!({ "error": "Internal server error." }))
|
||||
)
|
||||
})?;
|
||||
|
||||
// Log the successful sign-in.
|
||||
info!("User signed in: {}", email);
|
||||
|
||||
// Return the JWT token in a JSON response.
|
||||
Ok(Json(json!({ "token": token })))
|
||||
}
|
||||
|
||||
@ -39,3 +39,17 @@ pub struct AuthError {
|
||||
|
||||
// Implement Error trait for AuthError if needed
|
||||
// impl std::error::Error for AuthError {}
|
||||
|
||||
|
||||
/// Data structure for user sign-in information.
|
||||
///
|
||||
/// This includes the user's email, password, and optionally a TOTP code.
|
||||
#[derive(Deserialize, ToSchema)]
|
||||
pub struct SignInData {
|
||||
/// User's email address.
|
||||
pub email: String,
|
||||
/// User's password.
|
||||
pub password: String,
|
||||
/// Optional TOTP code for two-factor authentication.
|
||||
pub totp: Option<String>,
|
||||
}
|
||||
Loading…
Reference in New Issue
Block a user