feat: update dependencies

This commit is contained in:
Kristofers Solo 2025-02-11 10:01:39 +02:00
parent a35e6e79dd
commit 60de42307f
23 changed files with 3627 additions and 201 deletions

3114
Cargo.lock generated Normal file

File diff suppressed because it is too large Load Diff

View File

@ -2,7 +2,7 @@
name = "{{project-name}}" name = "{{project-name}}"
version = "0.1.0" version = "0.1.0"
edition = "2021" edition = "2021"
authors = ["Kristofers Solo <dev@kristofers.xyz>"] authors = ["{{authors}}"]
[lib] [lib]
path = "src/lib.rs" path = "src/lib.rs"
@ -12,51 +12,48 @@ path = "src/main.rs"
name = "{{project-name}}" name = "{{project-name}}"
[dependencies] [dependencies]
axum = "0.7" axum = "0.8"
chrono = { version = "0.4", features = ["serde", "clock"] } chrono = { version = "0.4", features = ["serde", "clock"] }
config = { version = "0.14", features = ["toml"], default-features = false } config = { version = "0.15", features = ["toml"], default-features = false }
serde = { version = "1", features = ["derive"] } serde = { version = "1", features = ["derive"] }
sqlx = { version = "0.8", default-features = false, features = [ sqlx = { version = "0.8", default-features = false, features = [
"runtime-tokio", "runtime-tokio",
"tls-rustls", "tls-rustls",
"macros", "macros",
"postgres", "postgres",
"uuid", "uuid",
"chrono", "chrono",
"migrate", "migrate",
] } ] }
tokio = { version = "1.39", features = [ tokio = { version = "1.39", features = [
"rt", "rt",
"macros", "macros",
"tracing", "tracing",
"rt-multi-thread", "rt-multi-thread",
] } ] }
uuid = { version = "1.8", features = ["v4", "serde"] } uuid = { version = "1.8", features = ["v4", "serde"] }
tracing = { version = "0.1", features = ["log"] } tracing = { version = "0.1", features = ["log"] }
tracing-subscriber = { version = "0.3", features = ["registry", "env-filter"] } tracing-subscriber = { version = "0.3", features = ["registry", "env-filter"] }
tower-http = { version = "0.5", features = ["trace"] } tower-http = { version = "0.6", features = ["trace"] }
tracing-bunyan-formatter = "0.3" tracing-bunyan-formatter = "0.3"
tracing-log = "0.2" tracing-log = "0.2"
secrecy = { version = "0.8", features = ["serde"] } secrecy = { version = "0.10", features = ["serde"] }
serde-aux = "4" serde-aux = "4"
reqwest = { version = "0.12", default-features = false, features = [ reqwest = { version = "0.12", default-features = false, features = [
"json", "json",
"rustls-tls", "rustls-tls",
] } ] }
[dev-dependencies] [dev-dependencies]
once_cell = "1.19" once_cell = "1.19"
fake = "2.9" fake = "3.1"
quickcheck = "1.0" quickcheck = "1.0"
quickcheck_macros = "1.0" quickcheck_macros = "1.0"
wiremock = "0.6" wiremock = "0.6"
serde_json = "1" serde_json = "1"
[package.metadata.clippy] [lints.clippy]
warn = [ pedantic = "warn"
"clippy::pedantic", nursery = "warn"
"clippy::nursery", unwrap_used = "warn"
"clippy::unwrap_used",
"clippy::expect_used",
]

83
justfile Normal file
View File

@ -0,0 +1,83 @@
set dotenv-load
PROJECT_NAME := "{{project-name}}"
# List all available commands
default:
@just --list
# Format code
fmt:
cargo fmt
# Lint code
lint:
cargo clippy -- -D warnings
# Run tests
test:
cargo test
# Build the application (debug)
build:
cargo build
# Build the application (release)
build-release:
cargo build --release
# Run the application (debug)
run:
cargo run
# Run the application (release)
run-release:
cargo run --release
# Run migrations
migrate:
cargo sqlx migrate run
# Revert migrations
migrate-revert:
cargo sqlx migrate revert
# Create a new migration
migrate-create name:
cargo sqlx migrate add $(name)
# Check migrations
migrate-status:
cargo sqlx migrate status
# Watch for changes and run tests/linting/run (for development)
dev:
cargo watch -x clippy -x test -x run | bunyan
# Build, migrate, and run (release)
deploy:
just build-release
just migrate
just run-release
# Generate documentation
doc:
cargo doc --open
# Clean the project
clean:
cargo clean
# Analyze binary size
analyze-size:
cargo build --release
cargo install cargo-bloat
cargo bloat --release --all-features --crates
# Check dependencies for security vulnerabilities
audit:
cargo audit
# Check for outdated dependencies
outdated:
cargo outdated

0
scripts/init_db Executable file → Normal file
View File

View File

@ -1,108 +0,0 @@
use std::fmt::Display;
use secrecy::{ExposeSecret, Secret};
use serde::Deserialize;
use serde_aux::field_attributes::deserialize_number_from_string;
use sqlx::{
postgres::{PgConnectOptions, PgSslMode},
ConnectOptions,
};
#[derive(Debug, Deserialize)]
pub struct Settings {
pub database: DatabaseSettings,
pub application: ApplicationSettings,
}
#[derive(Debug, Deserialize)]
pub struct DatabaseSettings {
pub username: String,
pub password: Secret<String>,
#[serde(deserialize_with = "deserialize_number_from_string")]
pub port: u16,
pub host: String,
pub database_name: String,
pub require_ssl: bool,
}
#[derive(Debug, Deserialize)]
pub struct ApplicationSettings {
#[serde(deserialize_with = "deserialize_number_from_string")]
pub port: u16,
pub host: String,
}
#[derive(Debug)]
pub enum Environment {
Local,
Production,
}
pub fn get_config() -> Result<Settings, config::ConfigError> {
let base_path = std::env::current_dir().expect("Failed to determine current directory");
let config_directory = base_path.join("config");
let env: Environment = std::env::var("APP_ENVIRONMENT")
.unwrap_or_else(|_| "local".into())
.try_into()
.expect("Failed to parse APP_ENVIRONMENT");
let env_filename = format!("{}.toml", &env);
let settings = config::Config::builder()
.add_source(config::File::from(config_directory.join("base.toml")))
.add_source(config::File::from(config_directory.join(env_filename)))
.add_source(
config::Environment::with_prefix("APP")
.prefix_separator("_")
.separator("__"),
)
.build()?;
settings.try_deserialize::<Settings>()
}
impl DatabaseSettings {
pub fn without_db(&self) -> PgConnectOptions {
let ssl_mode = if self.require_ssl {
PgSslMode::Require
} else {
PgSslMode::Prefer
};
PgConnectOptions::new()
.host(&self.host)
.username(&self.username)
.password(self.password.expose_secret())
.port(self.port)
.ssl_mode(ssl_mode)
}
pub fn with_db(&self) -> PgConnectOptions {
self.without_db()
.database(&self.database_name)
.log_statements(tracing_log::log::LevelFilter::Trace)
}
}
impl Display for Environment {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Environment::Local => write!(f, "local"),
Environment::Production => write!(f, "production"),
}
}
}
impl TryFrom<String> for Environment {
type Error = String;
fn try_from(value: String) -> Result<Self, Self::Error> {
match value.to_lowercase().as_str() {
"local" => Ok(Self::Local),
"production" => Ok(Self::Production),
other => Err(format!(
"{} is not supported environment. \
Use either `local` or `production`.",
other
)),
}
}
}

View File

@ -0,0 +1,9 @@
use serde::Deserialize;
use serde_aux::field_attributes::deserialize_number_from_string;
#[derive(Debug, Clone, Deserialize)]
pub struct ApplicationSettings {
#[serde(deserialize_with = "deserialize_number_from_string")]
pub port: u16,
pub host: String,
}

43
src/config/database.rs Normal file
View File

@ -0,0 +1,43 @@
use secrecy::{ExposeSecret, SecretString};
use serde::Deserialize;
use serde_aux::field_attributes::deserialize_number_from_string;
use sqlx::{
postgres::{PgConnectOptions, PgSslMode},
ConnectOptions,
};
#[derive(Debug, Clone, Deserialize)]
pub struct DatabaseSettings {
pub username: String,
pub password: SecretString,
#[serde(deserialize_with = "deserialize_number_from_string")]
pub port: u16,
pub host: String,
pub database_name: String,
pub require_ssl: bool,
}
impl DatabaseSettings {
#[must_use]
pub fn without_db(&self) -> PgConnectOptions {
let ssl_mode = if self.require_ssl {
PgSslMode::Require
} else {
PgSslMode::Prefer
};
PgConnectOptions::new()
.host(&self.host)
.username(&self.username)
.password(self.password.expose_secret())
.port(self.port)
.ssl_mode(ssl_mode)
}
#[must_use]
pub fn with_db(&self) -> PgConnectOptions {
self.without_db()
.database(&self.database_name)
.log_statements(tracing_log::log::LevelFilter::Trace)
}
}

38
src/config/environment.rs Normal file
View File

@ -0,0 +1,38 @@
use std::{fmt::Display, str::FromStr};
#[derive(Debug)]
pub enum Environment {
Local,
Production,
}
impl Display for Environment {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let s = match self {
Self::Local => "local",
Self::Production => "production",
};
write!(f, "{s}")
}
}
impl TryFrom<String> for Environment {
type Error = String;
fn try_from(value: String) -> Result<Self, Self::Error> {
Self::from_str(&value)
}
}
impl FromStr for Environment {
type Err = String;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s.to_lowercase().as_str() {
"local" => Ok(Self::Local),
"production" => Ok(Self::Production),
other => Err(format!(
"{other} is not supported environment. \
Use either `local` or `production`.",
)),
}
}
}

53
src/config/mod.rs Normal file
View File

@ -0,0 +1,53 @@
pub mod application;
pub mod database;
pub mod environment;
use application::ApplicationSettings;
pub use database::DatabaseSettings;
use environment::Environment;
use serde::Deserialize;
#[derive(Debug, Clone, Deserialize)]
pub struct Settings {
pub database: DatabaseSettings,
pub application: ApplicationSettings,
}
/// Get the configuration settings for the application.
///
/// # Panics
///
/// This function may panic in the following cases:
///
/// - If the current directory cannot be determined. This is highly unusual.
/// - If the `APP_ENVIRONMENT` environment variable is set to an invalid value
/// that cannot be converted to an `Environment` enum variant.
///
/// # Errors
///
/// This function returns an error if:
///
/// - Any of the configuration files (`base.toml`, `{environment}.toml`) cannot be read or parsed.
/// - Environment variables prefixed with `APP_` cannot be read or parsed.
/// - The resulting configuration cannot be deserialized into the `Settings` struct.
pub fn get_config() -> Result<Settings, config::ConfigError> {
let base_path = std::env::current_dir().expect("Failed to determine current directory");
let config_directory = base_path.join("config");
let env: Environment = std::env::var("APP_ENVIRONMENT")
.unwrap_or_else(|_| "local".into())
.try_into()
.expect("Failed to parse APP_ENVIRONMENT");
let env_filename = format!("{}.toml", &env);
let settings = config::Config::builder()
.add_source(config::File::from(config_directory.join("base.toml")))
.add_source(config::File::from(config_directory.join(env_filename)))
.add_source(
config::Environment::with_prefix("APP")
.prefix_separator("_")
.separator("__"),
)
.build()?;
settings.try_deserialize::<Settings>()
}

View File

@ -1 +1,24 @@
//! # Domain
//!
//! This module defines the core business logic and data structures of the application,
//! independent of any specific implementation details such as databases or external APIs.
//!
//! It contains value objects, entities, and domain services that represent the fundamental
//! concepts and rules of the application's problem domain.
//!
//! Example:
//!
//! ```
//! // A value object representing an email address
//! #[derive(Debug, Clone)]
//! pub struct EmailAddress(pub String);
//!
//! // An entity representing a User
//! pub struct User {
//! pub id: UserId,
//! pub email: EmailAddress,
//! pub name: String,
//! }
//!
//! pub struct UserId(pub String);
//! ```

0
src/errors/mod.rs Normal file
View File

View File

@ -1,4 +1,9 @@
pub mod config; pub mod config;
pub mod domain; pub mod domain;
pub mod errors;
pub mod middleware;
pub mod models;
pub mod repositories;
pub mod routes; pub mod routes;
pub mod telemetry; pub mod services;
pub mod startup;

View File

@ -1,21 +1,15 @@
use {{crate_name}}::{ use {{crate_name}}::{
config::get_config, config::get_config,
routes::route, middleware::telemetry::{get_subscriber, init_subscriber},
telemetry::{get_subscriber, init_subscriber}, startup::Application,
}; };
use sqlx::postgres::PgPoolOptions;
use tokio::net::TcpListener;
#[tokio::main] #[tokio::main]
async fn main() -> Result<(), std::io::Error> { async fn main() -> Result<(), std::io::Error> {
let subscriber = get_subscriber("{{project-name}}", "info", std::io::stdout); let subscriber = get_subscriber("{{crate_name}}", "info", std::io::stdout);
init_subscriber(subscriber); init_subscriber(subscriber);
let config = get_config().expect("Failed to read configuation."); let config = get_config().expect("Failed to read configuation.");
let pool = PgPoolOptions::new().connect_lazy_with(config.database.with_db()); let application = Application::build(config).await?;
let addr = format!("{}:{}", config.application.host, config.application.port); application.run_until_stopped().await?;
let listener = TcpListener::bind(addr) Ok(())
.await
.expect("Failed to bind port 8000.");
axum::serve(listener, route(pool)).await
} }

1
src/middleware/mod.rs Normal file
View File

@ -0,0 +1 @@
pub mod telemetry;

View File

@ -3,6 +3,12 @@ use tracing_bunyan_formatter::{BunyanFormattingLayer, JsonStorageLayer};
use tracing_log::LogTracer; use tracing_log::LogTracer;
use tracing_subscriber::{fmt::MakeWriter, layer::SubscriberExt, EnvFilter, Registry}; use tracing_subscriber::{fmt::MakeWriter, layer::SubscriberExt, EnvFilter, Registry};
/// Create a new tracing subscriber.
///
/// # Panics
///
/// This function may panic if there is a bug in the `EnvFilter::from` implementation,
/// causing the `env_filter.into()` conversion to fail. This is highly unlikely.
pub fn get_subscriber<Sink>( pub fn get_subscriber<Sink>(
name: &str, name: &str,
env_filter: &str, env_filter: &str,
@ -19,6 +25,16 @@ where
.with(formatting_layer) .with(formatting_layer)
} }
/// Initialize a global subscriber for tracing and logging.
///
/// # Panics
///
/// This function may panic in the following cases:
///
/// - If `LogTracer::init()` fails because the global logger has already been initialized.
/// This typically happens if `init_subscriber` is called more than once.
/// - If `set_global_default(subscriber)` fails because another subscriber has already been set,
/// or if there's an issue with the provided subscriber.
pub fn init_subscriber(subscriber: impl Subscriber + Sync + Send) { pub fn init_subscriber(subscriber: impl Subscriber + Sync + Send) {
LogTracer::init().expect("Failed to set logger"); LogTracer::init().expect("Failed to set logger");
set_global_default(subscriber).expect("Failed to set subscriber."); set_global_default(subscriber).expect("Failed to set subscriber.");

20
src/models/mod.rs Normal file
View File

@ -0,0 +1,20 @@
//! # Models
//!
//! This module defines the data structures (models or entities) that represent the core concepts
//! of the application's domain. These models are database-agnostic and primarily focus on
//! data representation.
//!
//! They are used to define the shape of the data as it exists within the application,
//! independent of how it's stored or retrieved.
//!
//! Example:
//!
//! ```
//! // A simple model for a Subscriber
//! #[derive(Debug, Clone)]
//! pub struct Subscriber {
//! pub id: i32,
//! pub email: String,
//! pub name: String,
//! }
//! ```

25
src/repositories/mod.rs Normal file
View File

@ -0,0 +1,25 @@
//! # Repositories
//!
//! This module provides an abstraction layer for data access. Repositories encapsulate the
//! logic for interacting with the database, providing a consistent interface for creating,
//! reading, updating, and deleting data.
//!
//! They are responsible for translating between the application's domain models and the
//! database's data structures. The database connection pool is passed as a parameter to
//! the repository methods.
//!
//! Example:
//!
//! ```rust
//! use sqlx::PgPool;
//! use sqlx::Error;
//!
//! // A repository for managing Subscribers
//! #[derive(Debug, Clone, Copy)]
//! pub struct SubscriberRepository;
//!
//! pub async fn create_subscriber(pool: &PgPool, email: &str, name: &str) -> Result<(), Error> {
//! // ... database interaction logic using the pool
//! Ok(())
//! }
//! ```

View File

@ -1,26 +1,25 @@
mod health_check; mod health_check;
use std::time::Duration; use axum::{routing::get, Router};
use crate::startup::AppState;
use axum::{ use axum::{
body::Bytes, body::Bytes,
extract::MatchedPath, extract::MatchedPath,
http::{HeaderMap, Request}, http::{HeaderMap, Request},
response::Response, response::Response,
routing::get,
Router,
}; };
pub use health_check::*; pub use health_check::*;
use sqlx::PgPool; use std::time::Duration;
use tower_http::{classify::ServerErrorsFailureClass, trace::TraceLayer}; use tower_http::classify::ServerErrorsFailureClass;
use tracing::{info_span, Span}; use tower_http::trace::TraceLayer;
use tracing::{info, info_span, Span};
use uuid::Uuid; use uuid::Uuid;
pub fn route(pool: PgPool) -> Router { pub fn route(state: AppState) -> Router {
Router::new() Router::new()
.route("/health_check", get(health_check)) .route("/health_check", get(health_check))
.with_state(pool) .with_state(state)
.layer( .layer(
TraceLayer::new_for_http() TraceLayer::new_for_http()
.make_span_with(|request: &Request<_>| { .make_span_with(|request: &Request<_>| {
@ -31,13 +30,29 @@ pub fn route(pool: PgPool) -> Router {
info_span!( info_span!(
"http-request", "http-request",
method = ?request.method(), method = ?request.method(),
uri = %request.uri(),
matched_path, matched_path,
some_other_field = tracing::field::Empty,
request_id=%Uuid::new_v4(), request_id=%Uuid::new_v4(),
) )
}) })
.on_request(|_request: &Request<_>, _span: &Span| {}) .on_request(|request: &Request<_>, span: &Span| {
.on_response(|_response: &Response<_>, _latency: Duration, _span: &Span| {}) info!(
target: "http_requests",
parent: span,
method = ?request.method(),
uri = %request.uri(),
"Incoming request"
);
})
.on_response(|response: &Response<_>, latency: Duration, span: &Span| {
info!(
target: "http_responses",
parent: span,
status = response.status().as_u16(),
latency = ?latency,
"Outgoing response"
);
})
.on_body_chunk(|_chunk: &Bytes, _latency: Duration, _span: &Span| {}) .on_body_chunk(|_chunk: &Bytes, _latency: Duration, _span: &Span| {})
.on_eos( .on_eos(
|_trailers: Option<&HeaderMap>, _stream_duration: Duration, _span: &Span| {}, |_trailers: Option<&HeaderMap>, _stream_duration: Duration, _span: &Span| {},
@ -46,4 +61,5 @@ pub fn route(pool: PgPool) -> Router {
|_error: ServerErrorsFailureClass, _latency: Duration, _span: &Span| {}, |_error: ServerErrorsFailureClass, _latency: Duration, _span: &Span| {},
), ),
) )
// .layer(create_telemetry_layer())
} }

25
src/services/mod.rs Normal file
View File

@ -0,0 +1,25 @@
//! # Services
//!
//! This module contains the business logic of the application. Services orchestrate the
//! interaction between models, repositories, and other components to fulfill specific use
//! cases.
//!
//! They define the core operations that the application performs and are responsible for
//! enforcing business rules and ensuring data consistency.
//!
//! Example:
//!
//! ```
//! // A service for managing newsletter subscriptions
//! pub struct NewsletterService {
//! // ... dependencies (e.g., SubscriberRepository, EmailClient)
//! }
//!
//! impl NewsletterService {
//! // Method to subscribe a user to the newsletter
//! pub async fn subscribe_user(&self, email: &str, name: &str) -> Result<(), String> {
//! // ... business logic (e.g., validate email, create subscriber, send confirmation email)
//! Ok(())
//! }
//! }
//! ```

69
src/startup.rs Normal file
View File

@ -0,0 +1,69 @@
use sqlx::{postgres::PgPoolOptions, PgPool};
use std::sync::Arc;
use tokio::{net::TcpListener, task::JoinHandle};
use crate::{
config::{DatabaseSettings, Settings},
routes::route,
};
pub struct App {
pub pool: PgPool,
}
pub type AppState = Arc<App>;
pub struct Application {
port: u16,
server: JoinHandle<Result<(), std::io::Error>>,
}
impl Application {
/// Builds and starts the application server.
///
/// # Errors
///
/// - Returns `std::io::Error` if:
/// - It fails to bind to the specified address.
///
/// # Panics
///
/// - Panics if `listener.local_addr()` returns `None`. This should only occur if the
/// listener is not properly bound to an address, which is considered a critical
/// failure during application startup.
pub async fn build(config: Settings) -> Result<Self, std::io::Error> {
let pool = get_connection_pool(&config.database);
let addr = format!("{}:{}", config.application.host, config.application.port);
let listener = TcpListener::bind(addr).await?;
let port = listener
.local_addr()
.expect("Listener should have a local address")
.port();
let app_state = App { pool }.into();
let server = tokio::spawn(async move { axum::serve(listener, route(app_state)).await });
Ok(Self { port, server })
}
#[must_use]
#[inline]
pub const fn port(&self) -> u16 {
self.port
}
/// Runs the application until it is stopped.
///
/// # Errors
///
/// - Returns `std::io::Error` if the server task encounters an error.
#[inline]
pub async fn run_until_stopped(self) -> Result<(), std::io::Error> {
self.server.await?
}
}
#[must_use]
pub fn get_connection_pool(config: &DatabaseSettings) -> PgPool {
PgPoolOptions::new().connect_lazy_with(config.with_db())
}

17
tests/api/health_check.rs Normal file
View File

@ -0,0 +1,17 @@
use crate::helpers::spawn_app;
use reqwest::Client;
#[tokio::test]
async fn health_check() {
let app = spawn_app().await;
let url = format!("{}/health_check", &app.address);
let client = Client::new();
let response = client
.get(&url)
.send()
.await
.expect("Failed to execute request");
assert!(response.status().is_success());
assert_eq!(Some(0), response.content_length());
}

View File

@ -1,29 +1,12 @@
use {{crate_name}}::{ use {{crate_name}}::{
config::{get_config, DatabaseSettings}, config::{get_config, DatabaseSettings},
routes::route, middleware::telemetry::{get_subscriber, init_subscriber},
telemetry::{get_subscriber, init_subscriber}, startup::{get_connection_pool, Application},
}; };
use once_cell::sync::Lazy; use once_cell::sync::Lazy;
use reqwest::Client;
use sqlx::{Connection, Executor, PgConnection, PgPool}; use sqlx::{Connection, Executor, PgConnection, PgPool};
use tokio::net::TcpListener;
use uuid::Uuid; use uuid::Uuid;
#[tokio::test]
async fn health_check() {
let app = spawn_app().await;
let url = format!("{}/health_check", &app.address);
let client = Client::new();
let response = client
.get(&url)
.send()
.await
.expect("Failed to execute request");
assert!(response.status().is_success());
assert_eq!(Some(0), response.content_length());
}
static TRACING: Lazy<()> = Lazy::new(|| { static TRACING: Lazy<()> = Lazy::new(|| {
let default_filter_level = "trace"; let default_filter_level = "trace";
let subscriber_name = "test"; let subscriber_name = "test";
@ -36,27 +19,33 @@ static TRACING: Lazy<()> = Lazy::new(|| {
} }
}); });
async fn spawn_app() -> TestApp { pub struct TestApp {
pub address: String,
pub pool: PgPool,
}
pub async fn spawn_app() -> TestApp {
Lazy::force(&TRACING); Lazy::force(&TRACING);
let listener = TcpListener::bind("127.0.0.1:0")
let config = {
let mut c = get_config().expect("Failed to read configuration.");
c.database.database_name = Uuid::new_v4().to_string();
c.application.port = 0;
c
};
configure_database(&config.database).await;
let application = Application::build(config.clone())
.await .await
.expect("Failed to bind random port"); .expect("Failed to build application.");
let port = listener.local_addr().unwrap().port();
let address = format!("http://127.0.0.1:{}", port);
let mut config = get_config().expect("Failed to read configuration."); let address = format!("http://127.0.0.1:{}", application.port());
let _ = tokio::spawn(application.run_until_stopped());
config.database.database_name = Uuid::new_v4().to_string(); TestApp {
address,
let pool = configure_database(&config.database).await; pool: get_connection_pool(&config.database),
let pool_clone = pool.clone(); }
let _ = tokio::spawn(async move {
axum::serve(listener, route(pool_clone))
.await
.expect("Failed to bind address.")
});
TestApp { address, pool }
} }
async fn configure_database(config: &DatabaseSettings) -> PgPool { async fn configure_database(config: &DatabaseSettings) -> PgPool {
@ -88,8 +77,3 @@ async fn configure_database(config: &DatabaseSettings) -> PgPool {
pool pool
} }
pub struct TestApp {
pub address: String,
pub pool: PgPool,
}

2
tests/api/main.rs Normal file
View File

@ -0,0 +1,2 @@
mod health_check;
mod helpers;