From 9b77a8dd78942ab1beb05d1804ecff16730f8901 Mon Sep 17 00:00:00 2001 From: phoenix Date: Mon, 31 Mar 2025 19:16:15 -0400 Subject: [PATCH 01/10] Separated the code --- src/callers/common.rs | 4 ++++ src/callers/mod.rs | 7 +++++++ src/callers/register.rs | 12 ++++++++++++ src/config/mod.rs | 10 ++++++++++ src/main.rs | 38 ++++++++++++-------------------------- src/models/common.rs | 11 +++++++++++ src/models/mod.rs | 1 + 7 files changed, 57 insertions(+), 26 deletions(-) create mode 100644 src/callers/common.rs create mode 100644 src/callers/mod.rs create mode 100644 src/callers/register.rs create mode 100644 src/config/mod.rs create mode 100644 src/models/common.rs create mode 100644 src/models/mod.rs diff --git a/src/callers/common.rs b/src/callers/common.rs new file mode 100644 index 0000000..c39654b --- /dev/null +++ b/src/callers/common.rs @@ -0,0 +1,4 @@ +// basic handler that responds with a static string +pub async fn root() -> &'static str { + "Hello, World!" +} diff --git a/src/callers/mod.rs b/src/callers/mod.rs new file mode 100644 index 0000000..79b9089 --- /dev/null +++ b/src/callers/mod.rs @@ -0,0 +1,7 @@ +pub mod common; +pub mod register; + +pub mod endpoints { + pub const ROOT: &str = "/"; + pub const REGISTER: &str = "api/v2/register"; +} diff --git a/src/callers/register.rs b/src/callers/register.rs new file mode 100644 index 0000000..7a1299a --- /dev/null +++ b/src/callers/register.rs @@ -0,0 +1,12 @@ +use axum::{Json, http::StatusCode}; + +use crate::models; + +pub async fn register_user( + Json(payload): Json, +) -> (StatusCode, Json) { + let user = models::common::User { + username: payload.username.clone(), + }; + (StatusCode::CREATED, Json(user)) +} diff --git a/src/config/mod.rs b/src/config/mod.rs new file mode 100644 index 0000000..f34b535 --- /dev/null +++ b/src/config/mod.rs @@ -0,0 +1,10 @@ +pub fn get_full() -> String { + get_address() + ":" + &get_port() +} +fn get_address() -> String { + String::from("0.0.0.0") +} + +fn get_port() -> String { + String::from("3000") +} diff --git a/src/main.rs b/src/main.rs index 6a44cd6..a18c999 100644 --- a/src/main.rs +++ b/src/main.rs @@ -1,11 +1,11 @@ use axum::{ - // Json, Router, - // http::StatusCode, - routing::get, - // routing::{get, post}, + routing::{get, post}, }; -// use serde::{Deserialize, Serialize}; + +pub mod callers; +pub mod config; +pub mod models; #[tokio::main] async fn main() { @@ -14,28 +14,14 @@ async fn main() { // build our application with a route let app = Router::new() - // `GET /` goes to `root` - .route("/", get(root)); - // `POST /users` goes to `create_user` - // .route("/users", post(create_user)); + .route(callers::endpoints::ROOT, get(callers::common::root)) + .route( + callers::endpoints::REGISTER, + post(callers::register::register_user), + ); // run our app with hyper, listening globally on port 3000 - let listener = tokio::net::TcpListener::bind(get_full()).await.unwrap(); + let url = config::get_full(); + let listener = tokio::net::TcpListener::bind(url).await.unwrap(); axum::serve(listener, app).await.unwrap(); } - -fn get_full() -> String { - get_address() + ":" + &get_port() -} -fn get_address() -> String { - String::from("0.0.0.0") -} - -fn get_port() -> String { - String::from("3000") -} - -// basic handler that responds with a static string -async fn root() -> &'static str { - "Hello, World!" -} diff --git a/src/models/common.rs b/src/models/common.rs new file mode 100644 index 0000000..cda15f2 --- /dev/null +++ b/src/models/common.rs @@ -0,0 +1,11 @@ +use serde::{Deserialize, Serialize}; + +#[derive(Deserialize)] +pub struct CreateUser { + pub username: String, +} + +#[derive(Serialize)] +pub struct User { + pub username: String, +} diff --git a/src/models/mod.rs b/src/models/mod.rs new file mode 100644 index 0000000..34994bf --- /dev/null +++ b/src/models/mod.rs @@ -0,0 +1 @@ +pub mod common; From c9873d95d761fbec4f1e78f34f8178d19f4222ce Mon Sep 17 00:00:00 2001 From: phoenix Date: Tue, 1 Apr 2025 00:29:09 +0000 Subject: [PATCH 02/10] Added test (#8) Reviewed-on: https://git.kundeng.us/phoenix/icarus_auth/pulls/8 Co-authored-by: phoenix Co-committed-by: phoenix --- Cargo.toml | 4 +++- src/lib.rs | 3 +++ src/main.rs | 5 ++--- tests/auth_tests.rs | 42 ++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 50 insertions(+), 4 deletions(-) create mode 100644 src/lib.rs create mode 100644 tests/auth_tests.rs diff --git a/Cargo.toml b/Cargo.toml index d27538f..ce7a653 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -8,5 +8,7 @@ axum = { version = "0.8.3" } serde = { version = "1.0.218", features = ["derive"] } serde_json = { version = "1.0.139" } tokio = { version = "1.44.1", features = ["rt-multi-thread"] } -tracing-subscriber = "0.3.19" +tracing-subscriber = { version = "0.3.19" } +tower = { version = "0.5.2" } +hyper = { version = "1.6.0" } icarus_models = { git = "ssh://git@git.kundeng.us/phoenix/icarus_models.git", tag = "v0.2.0" } diff --git a/src/lib.rs b/src/lib.rs new file mode 100644 index 0000000..231cc1e --- /dev/null +++ b/src/lib.rs @@ -0,0 +1,3 @@ +pub mod callers; +pub mod config; +pub mod models; diff --git a/src/main.rs b/src/main.rs index a18c999..036d622 100644 --- a/src/main.rs +++ b/src/main.rs @@ -3,9 +3,8 @@ use axum::{ routing::{get, post}, }; -pub mod callers; -pub mod config; -pub mod models; +use icarus_auth::callers; +use icarus_auth::config; #[tokio::main] async fn main() { diff --git a/tests/auth_tests.rs b/tests/auth_tests.rs new file mode 100644 index 0000000..19e08fe --- /dev/null +++ b/tests/auth_tests.rs @@ -0,0 +1,42 @@ +extern crate icarus_auth; + +use axum::body::Body; +// use axum::response::Response; +use axum::{ + Router, + http::{Request, StatusCode}, + routing::get, +}; +// use http::{Request, StatusCode}; +// use serde_json::json; +// use tower::ServiceExt; // for `.oneshot()` +use tower::util::ServiceExt; + +use crate::icarus_auth::callers; + +#[tokio::test] +async fn test_hello_world() { + let app = Router::new().route(callers::endpoints::ROOT, get(callers::common::root)); // Replace with your handler + + let response = app + .oneshot( + Request::builder() + .uri(callers::endpoints::ROOT) + .body(Body::empty()) + .unwrap(), + ) + .await + .unwrap(); + + assert_eq!(response.status(), StatusCode::OK); + + let body = String::from_utf8( + axum::body::to_bytes(response.into_body(), usize::MAX) + .await + .unwrap() + .to_vec(), + ) + .unwrap(); + + assert_eq!(body, "Hello, World!"); +} From 4d3415acf2c1d57ee870f04737c1368a4ddd2598 Mon Sep 17 00:00:00 2001 From: KD Date: Thu, 3 Apr 2025 13:59:54 +0000 Subject: [PATCH 03/10] Added config file for db (#9) Reviewed-on: https://git.kundeng.us/phoenix/icarus_auth/pulls/9 Co-authored-by: KD Co-committed-by: KD --- .env.sample | 2 + .gitea/workflows/workflow.yml | 14 +++ .gitignore | 1 + Cargo.toml | 7 ++ migrations/20250402221858_init_migrate.sql | 1 + run_migrations.txt | 3 + src/callers/common.rs | 26 +++++ src/callers/mod.rs | 3 +- src/lib.rs | 30 ++++++ src/main.rs | 81 +++++++++++++-- tests/auth_tests.rs | 113 ++++++++++++++++++++- 11 files changed, 272 insertions(+), 9 deletions(-) create mode 100644 .env.sample create mode 100644 migrations/20250402221858_init_migrate.sql create mode 100644 run_migrations.txt diff --git a/.env.sample b/.env.sample new file mode 100644 index 0000000..1625b7a --- /dev/null +++ b/.env.sample @@ -0,0 +1,2 @@ +DATABASE_URL=postgres://username:password@localhost/database_name +TEST_DATABASE_URL=postgres://username:password@localhost/database_name_test diff --git a/.gitea/workflows/workflow.yml b/.gitea/workflows/workflow.yml index d216362..bedf50d 100644 --- a/.gitea/workflows/workflow.yml +++ b/.gitea/workflows/workflow.yml @@ -37,6 +37,20 @@ jobs: - uses: actions-rust-lang/setup-rust-toolchain@v1 with: toolchain: 1.85.0 + # --- Add this step for explicit verification --- + - name: Verify Docker Environment + run: | + echo "Runner User Info:" + id + echo "Checking Docker Version:" + docker --version + echo "Checking Docker Daemon Status (info):" + docker info + echo "Checking Docker Daemon Status (ps):" + docker ps -a + echo "Docker environment check complete." + # NOTE: Do NOT use continue-on-error here. + # If Docker isn't working as expected, the job SHOULD fail here. - run: | mkdir -p ~/.ssh echo "${{ secrets.MYREPO_TOKEN }}" > ~/.ssh/gitlab_deploy_key diff --git a/.gitignore b/.gitignore index 96ef6c0..e551aa3 100644 --- a/.gitignore +++ b/.gitignore @@ -1,2 +1,3 @@ /target Cargo.lock +.env diff --git a/Cargo.toml b/Cargo.toml index ce7a653..45a6f29 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -11,4 +11,11 @@ tokio = { version = "1.44.1", features = ["rt-multi-thread"] } tracing-subscriber = { version = "0.3.19" } tower = { version = "0.5.2" } hyper = { version = "1.6.0" } +sqlx = { version = "0.8.3", features = ["postgres", "runtime-tokio-native-tls"] } +dotenv = { version = "0.15" } icarus_models = { git = "ssh://git@git.kundeng.us/phoenix/icarus_models.git", tag = "v0.2.0" } + +[dev-dependencies] +http-body-util = "0.1.3" +reqwest = { version = "0.12.5", features = ["json"] } # For making HTTP requests in tests +once_cell = "1.19" # Useful for lazy initialization in tests/app setup diff --git a/migrations/20250402221858_init_migrate.sql b/migrations/20250402221858_init_migrate.sql new file mode 100644 index 0000000..8ddc1d3 --- /dev/null +++ b/migrations/20250402221858_init_migrate.sql @@ -0,0 +1 @@ +-- Add migration script here diff --git a/run_migrations.txt b/run_migrations.txt new file mode 100644 index 0000000..ae8892a --- /dev/null +++ b/run_migrations.txt @@ -0,0 +1,3 @@ +cargo install sqlx-cli +sqlx migrate add init_migration +sqlx migrate run diff --git a/src/callers/common.rs b/src/callers/common.rs index c39654b..afb5ffb 100644 --- a/src/callers/common.rs +++ b/src/callers/common.rs @@ -1,4 +1,30 @@ +use axum::{Extension, Json, http::StatusCode}; + +use serde::{Deserialize, Serialize}; + +#[derive(Deserialize, Serialize)] +pub struct TestResult { + message: String, +} + // basic handler that responds with a static string pub async fn root() -> &'static str { "Hello, World!" } + +pub async fn db_ping(Extension(pool): Extension) -> (StatusCode, Json) { + match sqlx::query("SELECT 1").execute(&pool).await { + Ok(_) => { + let tr = TestResult { + message: String::from("This works"), + }; + (StatusCode::OK, Json(tr)) + } + Err(e) => ( + StatusCode::BAD_REQUEST, + Json(TestResult { + message: e.to_string(), + }), + ), + } +} diff --git a/src/callers/mod.rs b/src/callers/mod.rs index 79b9089..33ddec1 100644 --- a/src/callers/mod.rs +++ b/src/callers/mod.rs @@ -3,5 +3,6 @@ pub mod register; pub mod endpoints { pub const ROOT: &str = "/"; - pub const REGISTER: &str = "api/v2/register"; + pub const REGISTER: &str = "/api/v2/register"; + pub const DBTEST: &str = "/api/v2/test/db"; } diff --git a/src/lib.rs b/src/lib.rs index 231cc1e..c3c562c 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,3 +1,33 @@ pub mod callers; pub mod config; pub mod models; + +mod keys { + pub const DBURL: &str = "DATABASE_URL"; + + pub mod error { + pub const ERROR: &str = "DATABASE_URL must be set in .env"; + } +} + +mod connection_settings { + pub const MAXCONN: u32 = 5; +} + +pub mod db_pool { + + use sqlx::postgres::PgPoolOptions; + use std::env; + + use crate::{connection_settings, keys}; + + pub async fn create_pool() -> Result { + dotenv::dotenv().ok(); + let database_url = env::var(keys::DBURL).expect(keys::error::ERROR); + + PgPoolOptions::new() + .max_connections(connection_settings::MAXCONN) + .connect(&database_url) + .await + } +} diff --git a/src/main.rs b/src/main.rs index 036d622..80e37ee 100644 --- a/src/main.rs +++ b/src/main.rs @@ -2,25 +2,92 @@ use axum::{ Router, routing::{get, post}, }; +// use std::net::SocketAddr; use icarus_auth::callers; use icarus_auth::config; +// use sqlx::Postgres; #[tokio::main] async fn main() { // initialize tracing tracing_subscriber::fmt::init(); - // build our application with a route - let app = Router::new() - .route(callers::endpoints::ROOT, get(callers::common::root)) - .route( - callers::endpoints::REGISTER, - post(callers::register::register_user), - ); + let app = app().await; // run our app with hyper, listening globally on port 3000 let url = config::get_full(); let listener = tokio::net::TcpListener::bind(url).await.unwrap(); axum::serve(listener, app).await.unwrap(); } + +async fn app() -> Router { + let pool = icarus_auth::db_pool::create_pool() + .await + .expect("Failed to create pool"); + + // build our application with a route + Router::new() + .route(callers::endpoints::DBTEST, get(callers::common::db_ping)) + .route(callers::endpoints::ROOT, get(callers::common::root)) + .route( + callers::endpoints::REGISTER, + post(callers::register::register_user), + ) + .layer(axum::Extension(pool)) +} + +#[cfg(test)] +mod tests { + use super::*; + use axum::{ + body::Body, + // extract::connect_info::MockConnectInfo, + http::{Request, StatusCode}, + }; + use http_body_util::BodyExt; + // use http_body_util::BodyExt; // for `collect` + // use serde_json::{Value, json}; + // use tokio::net::TcpListener; + // use tower::{Service, ServiceExt}; // for `call`, `oneshot`, and `ready` + use tower::ServiceExt; // for `call`, `oneshot`, and `ready` + + #[tokio::test] + async fn hello_world() { + let app = app().await; + + // `Router` implements `tower::Service>` so we can + // call it like any tower service, no need to run an HTTP server. + let response = app + .oneshot( + Request::builder() + .uri(callers::endpoints::ROOT) + .body(Body::empty()) + .unwrap(), + ) + .await + .unwrap(); + + assert_eq!(response.status(), StatusCode::OK); + + /* + match response.into_body().collect().await { + Ok(o) => { + let parsed: String = match String::from_utf8(o.to_bytes()) { + Ok(s) => s, + Err(err) => { + String::new() + } + }; + } + Err(err) => { + assert!(false, + "Error: {:?}", err.to_string()); + } + } + */ + + let body = response.into_body().collect().await.unwrap().to_bytes(); + assert_eq!(&body[..], b"Hello, World!"); + } +} diff --git a/tests/auth_tests.rs b/tests/auth_tests.rs index 19e08fe..7268c22 100644 --- a/tests/auth_tests.rs +++ b/tests/auth_tests.rs @@ -1,5 +1,8 @@ extern crate icarus_auth; +use crate::icarus_auth::callers; + +// use axum::Extension; use axum::body::Body; // use axum::response::Response; use axum::{ @@ -7,13 +10,78 @@ use axum::{ http::{Request, StatusCode}, routing::get, }; +// use hyper::client::conn; +// use sqlx::PgPool; +// use sqlx::postgres::{self, PgPoolOptions}; +// use testcontainers_modules::testcontainers::runners::AsyncRunner; +// use hyper::client; +// use sqlx::postgres; // use http::{Request, StatusCode}; // use serde_json::json; // use tower::ServiceExt; // for `.oneshot()` use tower::util::ServiceExt; +// use testcontainers_modules::testcontainers::core::client:: -use crate::icarus_auth::callers; +const TEST_DATABASE_URL_ENV: &str = "TEST_DATABASE_URL"; +const DEFAULT_TEST_DATABASE_URL: &str = + "postgres://icarus_op_test:password@localhost:5432/icarus_auth_test"; +static SETUP: std::sync::Once = std::sync::Once::new(); + +// Ensure tracing is initialized only once for all tests in this file +/* +static TRACING_INIT: Lazy<()> = Lazy::new(|| { + if std::env::var("RUST_LOG").is_err() { + // Set default log level if not provided + unsafe { + std::env::set_var("RUST_LOG", "info,tower_http=debug,your_project_name=debug"); + } + } + tracing_subscriber::fmt() + .with_env_filter(tracing_subscriber::EnvFilter::from_default_env()) + .with_test_writer() // Write logs to the test output capture + .init(); +}); +*/ + +/* +async fn setup_database() -> sqlx::PgPool { + let database_url = std::env::var(TEST_DATABASE_URL_ENV) + .unwrap_or_else(|_| DEFAULT_TEST_DATABASE_URL.to_string()); + let pool = sqlx::PgPool::connect(&database_url) + .await + .expect("Failed to connect to test database"); + + let migrator = sqlx::migrate::Migrator::new(std::path::Path::new("./migrations")) + .await + .expect("Failed to create migrator"); + migrator.run(&pool).await.expect("Failed to run migrations"); + + // Seed here if needed + pool +} + */ + +/* +#[tokio::test] +async fn test_db_health() { + SETUP.call_once(|| { + tokio::runtime::Runtime::new().unwrap().block_on(async { + setup_database().await; + }); + }); +} +*/ + +/* +async fn setup_test(pool: sqlx::PgPool) -> Router { + Router::new() + .route(callers::endpoints::DBTEST, get(callers::common::db_ping)) + .layer(Extension(pool)) +} +*/ + +/* #[tokio::test] async fn test_hello_world() { let app = Router::new().route(callers::endpoints::ROOT, get(callers::common::root)); // Replace with your handler @@ -40,3 +108,46 @@ async fn test_hello_world() { assert_eq!(body, "Hello, World!"); } +*/ + +/* +#[tokio::test] +async fn _test_db_health_check() { + let container = testcontainers_modules::postgres::Postgres::default() + .start() + .await + .unwrap(); + let _host_ip = container.get_host().await.unwrap(); + let port = 5432; + let host_port = container.get_host_port_ipv4(port).await.unwrap(); + let conn_string = &format!( + "postgres://postgres:postgres@localhost:{}/postgres", + host_port + ); + + println!("Test Database: {}", conn_string); + + let app = Router::new().route(callers::endpoints::DBTEST, get(callers::common::db_ping)); // Replace with your handler + + let response = app + .oneshot( + Request::builder() + .uri(callers::endpoints::DBTEST) + .body(Body::empty()) + .unwrap(), + ) + .await + .unwrap(); + + assert_eq!(response.status(), StatusCode::OK); + + match PgPoolOptions::new().connect(conn_string).await { + Ok(_) => { + assert!(true, "Success"); + } + Err(err) => { + assert!(false, "Error: {:?}", err.to_string()); + } + }; +} + */ From b6787de66bfddb7a8b0978aa6f13590b0cb0413b Mon Sep 17 00:00:00 2001 From: KD Date: Thu, 3 Apr 2025 10:07:58 -0400 Subject: [PATCH 04/10] Removing test file --- tests/auth_tests.rs | 153 -------------------------------------------- 1 file changed, 153 deletions(-) delete mode 100644 tests/auth_tests.rs diff --git a/tests/auth_tests.rs b/tests/auth_tests.rs deleted file mode 100644 index 7268c22..0000000 --- a/tests/auth_tests.rs +++ /dev/null @@ -1,153 +0,0 @@ -extern crate icarus_auth; - -use crate::icarus_auth::callers; - -// use axum::Extension; -use axum::body::Body; -// use axum::response::Response; -use axum::{ - Router, - http::{Request, StatusCode}, - routing::get, -}; -// use hyper::client::conn; -// use sqlx::PgPool; -// use sqlx::postgres::{self, PgPoolOptions}; -// use testcontainers_modules::testcontainers::runners::AsyncRunner; -// use hyper::client; -// use sqlx::postgres; -// use http::{Request, StatusCode}; -// use serde_json::json; -// use tower::ServiceExt; // for `.oneshot()` -use tower::util::ServiceExt; -// use testcontainers_modules::testcontainers::core::client:: - -const TEST_DATABASE_URL_ENV: &str = "TEST_DATABASE_URL"; -const DEFAULT_TEST_DATABASE_URL: &str = - "postgres://icarus_op_test:password@localhost:5432/icarus_auth_test"; - -static SETUP: std::sync::Once = std::sync::Once::new(); - -// Ensure tracing is initialized only once for all tests in this file -/* -static TRACING_INIT: Lazy<()> = Lazy::new(|| { - if std::env::var("RUST_LOG").is_err() { - // Set default log level if not provided - unsafe { - std::env::set_var("RUST_LOG", "info,tower_http=debug,your_project_name=debug"); - } - } - tracing_subscriber::fmt() - .with_env_filter(tracing_subscriber::EnvFilter::from_default_env()) - .with_test_writer() // Write logs to the test output capture - .init(); -}); -*/ - -/* -async fn setup_database() -> sqlx::PgPool { - let database_url = std::env::var(TEST_DATABASE_URL_ENV) - .unwrap_or_else(|_| DEFAULT_TEST_DATABASE_URL.to_string()); - let pool = sqlx::PgPool::connect(&database_url) - .await - .expect("Failed to connect to test database"); - - let migrator = sqlx::migrate::Migrator::new(std::path::Path::new("./migrations")) - .await - .expect("Failed to create migrator"); - migrator.run(&pool).await.expect("Failed to run migrations"); - - // Seed here if needed - pool -} - */ - -/* -#[tokio::test] -async fn test_db_health() { - SETUP.call_once(|| { - tokio::runtime::Runtime::new().unwrap().block_on(async { - setup_database().await; - }); - }); -} -*/ - -/* -async fn setup_test(pool: sqlx::PgPool) -> Router { - Router::new() - .route(callers::endpoints::DBTEST, get(callers::common::db_ping)) - .layer(Extension(pool)) -} -*/ - -/* -#[tokio::test] -async fn test_hello_world() { - let app = Router::new().route(callers::endpoints::ROOT, get(callers::common::root)); // Replace with your handler - - let response = app - .oneshot( - Request::builder() - .uri(callers::endpoints::ROOT) - .body(Body::empty()) - .unwrap(), - ) - .await - .unwrap(); - - assert_eq!(response.status(), StatusCode::OK); - - let body = String::from_utf8( - axum::body::to_bytes(response.into_body(), usize::MAX) - .await - .unwrap() - .to_vec(), - ) - .unwrap(); - - assert_eq!(body, "Hello, World!"); -} -*/ - -/* -#[tokio::test] -async fn _test_db_health_check() { - let container = testcontainers_modules::postgres::Postgres::default() - .start() - .await - .unwrap(); - let _host_ip = container.get_host().await.unwrap(); - let port = 5432; - let host_port = container.get_host_port_ipv4(port).await.unwrap(); - let conn_string = &format!( - "postgres://postgres:postgres@localhost:{}/postgres", - host_port - ); - - println!("Test Database: {}", conn_string); - - let app = Router::new().route(callers::endpoints::DBTEST, get(callers::common::db_ping)); // Replace with your handler - - let response = app - .oneshot( - Request::builder() - .uri(callers::endpoints::DBTEST) - .body(Body::empty()) - .unwrap(), - ) - .await - .unwrap(); - - assert_eq!(response.status(), StatusCode::OK); - - match PgPoolOptions::new().connect(conn_string).await { - Ok(_) => { - assert!(true, "Success"); - } - Err(err) => { - assert!(false, "Error: {:?}", err.to_string()); - } - }; -} - */ From 79f6ebdc099496401554aee8a2b9cb7540389422 Mon Sep 17 00:00:00 2001 From: KD Date: Thu, 3 Apr 2025 12:25:01 -0400 Subject: [PATCH 05/10] Moved router code to its own function --- src/main.rs | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/src/main.rs b/src/main.rs index 80e37ee..3e254b3 100644 --- a/src/main.rs +++ b/src/main.rs @@ -21,11 +21,7 @@ async fn main() { axum::serve(listener, app).await.unwrap(); } -async fn app() -> Router { - let pool = icarus_auth::db_pool::create_pool() - .await - .expect("Failed to create pool"); - +async fn routes() -> Router { // build our application with a route Router::new() .route(callers::endpoints::DBTEST, get(callers::common::db_ping)) @@ -34,7 +30,14 @@ async fn app() -> Router { callers::endpoints::REGISTER, post(callers::register::register_user), ) - .layer(axum::Extension(pool)) +} + +async fn app() -> Router { + let pool = icarus_auth::db_pool::create_pool() + .await + .expect("Failed to create pool"); + + routes().await.layer(axum::Extension(pool)) } #[cfg(test)] From 7e189e84d8f9f1a7d8a03e410a14dc51a7084c07 Mon Sep 17 00:00:00 2001 From: KD Date: Thu, 3 Apr 2025 16:26:36 +0000 Subject: [PATCH 06/10] Adding code to use test database when in debug mode (#10) Reviewed-on: https://git.kundeng.us/phoenix/icarus_auth/pulls/10 Co-authored-by: KD Co-committed-by: KD --- .gitea/workflows/workflow.yml | 45 ++++++++++++++++++++++++++++++++++- Cargo.toml | 2 +- src/lib.rs | 22 +++++++++++++++-- 3 files changed, 65 insertions(+), 4 deletions(-) diff --git a/.gitea/workflows/workflow.yml b/.gitea/workflows/workflow.yml index bedf50d..bf47367 100644 --- a/.gitea/workflows/workflow.yml +++ b/.gitea/workflows/workflow.yml @@ -32,6 +32,22 @@ jobs: test: name: Test Suite runs-on: ubuntu-24.04 + # --- Add database service definition --- + services: + postgres: + image: postgres:17.4 # Or pin to a more specific version like 14.9 + env: + # Use secrets for DB init, with fallbacks for flexibility + POSTGRES_USER: ${{ secrets.DB_TEST_USER || 'testuser' }} + POSTGRES_PASSWORD: ${{ secrets.DB_TEST_PASSWORD || 'testpassword' }} + POSTGRES_DB: ${{ secrets.DB_TEST_NAME || 'testdb' }} + # Options to wait until the database is ready + options: >- + --health-cmd pg_isready + --health-interval 10s + --health-timeout 5s + --health-retries 5 + steps: - uses: actions/checkout@v4 - uses: actions-rust-lang/setup-rust-toolchain@v1 @@ -51,7 +67,34 @@ jobs: echo "Docker environment check complete." # NOTE: Do NOT use continue-on-error here. # If Docker isn't working as expected, the job SHOULD fail here. - - run: | + # --- Optional but Recommended: Database Migrations Step --- + - name: Run Database Migrations + env: + # Define TEST_DATABASE_URL using service details and secrets + TEST_DATABASE_URL: postgresql://${{ secrets.DB_TEST_USER || 'testuser' }}:${{ secrets.DB_TEST_PASSWORD || 'testpassword' }}@postgres:5432/${{ secrets.DB_TEST_NAME || 'testdb' }} + # Make SSH agent available if migrations fetch private dependencies + SSH_AUTH_SOCK: ${{ env.SSH_AUTH_SOCK }} + run: | + echo "Running database migrations..." + # ===> IMPORTANT: Replace placeholder below with your actual migration command <=== + # Example: Install and run sqlx-cli + # cargo install sqlx-cli --no-default-features --features native-tls,postgres + # sqlx database setup --database-url $TEST_DATABASE_URL + + # Example: Install and run diesel_cli + # cargo install diesel_cli --no-default-features --features postgres + # diesel migration run --database-url $TEST_DATABASE_URL + + # echo "[Placeholder] Your migration command goes here." + # ===> End of Placeholder <=== + - name: Run tests + env: + # Define TEST_DATABASE_URL for tests to use + TEST_DATABASE_URL: postgresql://${{ secrets.DB_TEST_USER || 'testuser' }}:${{ secrets.DB_TEST_PASSWORD || 'testpassword' }}@postgres:5432/${{ secrets.DB_TEST_NAME || 'testdb' }} + RUST_LOG: info # Optional: configure test log level + # Make SSH agent available if tests fetch private dependencies + SSH_AUTH_SOCK: ${{ env.SSH_AUTH_SOCK }} + run: | mkdir -p ~/.ssh echo "${{ secrets.MYREPO_TOKEN }}" > ~/.ssh/gitlab_deploy_key chmod 600 ~/.ssh/gitlab_deploy_key diff --git a/Cargo.toml b/Cargo.toml index 45a6f29..6567308 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -12,7 +12,7 @@ tracing-subscriber = { version = "0.3.19" } tower = { version = "0.5.2" } hyper = { version = "1.6.0" } sqlx = { version = "0.8.3", features = ["postgres", "runtime-tokio-native-tls"] } -dotenv = { version = "0.15" } +dotenvy = { version = "0.15.7" } icarus_models = { git = "ssh://git@git.kundeng.us/phoenix/icarus_models.git", tag = "v0.2.0" } [dev-dependencies] diff --git a/src/lib.rs b/src/lib.rs index c3c562c..641a39d 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -8,6 +8,13 @@ mod keys { pub mod error { pub const ERROR: &str = "DATABASE_URL must be set in .env"; } + + pub mod test { + pub const DBURL: &str = "TEST_DATABASE_URL"; + pub mod error { + pub const ERROR: &str = "TEST_DATABASE_URL must be set in .env"; + } + } } mod connection_settings { @@ -22,12 +29,23 @@ pub mod db_pool { use crate::{connection_settings, keys}; pub async fn create_pool() -> Result { - dotenv::dotenv().ok(); - let database_url = env::var(keys::DBURL).expect(keys::error::ERROR); + let database_url = get_db_url().await; + println!("Database url: {:?}", database_url); PgPoolOptions::new() .max_connections(connection_settings::MAXCONN) .connect(&database_url) .await } + + async fn get_db_url() -> String { + #[cfg(debug_assertions)] // Example: Only load .env in debug builds + dotenvy::dotenv().ok(); + + if cfg!(debug_assertions) { + env::var(keys::test::DBURL).expect(keys::test::error::ERROR) + } else { + env::var(keys::DBURL).expect(keys::error::ERROR) + } + } } From 9be38542c1baf99b47af22c0aa0cce2e82a1d1d0 Mon Sep 17 00:00:00 2001 From: KD Date: Thu, 3 Apr 2025 12:27:48 -0400 Subject: [PATCH 07/10] Version bump --- Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index 6567308..fac1e92 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "icarus_auth" -version = "0.1.0" +version = "0.1.1" edition = "2024" [dependencies] From 238fb15e6d83d6adb2a729de3adc4f046d785b7a Mon Sep 17 00:00:00 2001 From: KD Date: Thu, 3 Apr 2025 12:40:29 -0400 Subject: [PATCH 08/10] Updated ssh key --- .gitea/workflows/workflow.yml | 30 +++++++++++++++--------------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/.gitea/workflows/workflow.yml b/.gitea/workflows/workflow.yml index bf47367..acfa95b 100644 --- a/.gitea/workflows/workflow.yml +++ b/.gitea/workflows/workflow.yml @@ -21,12 +21,12 @@ jobs: toolchain: 1.85.0 - run: | mkdir -p ~/.ssh - echo "${{ secrets.MYREPO_TOKEN }}" > ~/.ssh/gitlab_deploy_key - chmod 600 ~/.ssh/gitlab_deploy_key + echo "${{ secrets.MYREPO_TOKEN }}" > ~/.ssh/icarus_models_deploy_key + chmod 600 ~/.ssh/icarus_models_deploy_key ssh-keyscan ${{ vars.MYHOST }} >> ~/.ssh/known_hosts eval $(ssh-agent -s) - ssh-add -v ~/.ssh/gitlab_deploy_key + ssh-add -v ~/.ssh/icarus_models_deploy_key cargo check test: @@ -96,12 +96,12 @@ jobs: SSH_AUTH_SOCK: ${{ env.SSH_AUTH_SOCK }} run: | mkdir -p ~/.ssh - echo "${{ secrets.MYREPO_TOKEN }}" > ~/.ssh/gitlab_deploy_key - chmod 600 ~/.ssh/gitlab_deploy_key + echo "${{ secrets.MYREPO_TOKEN }}" > ~/.ssh/icarus_models_deploy_key + chmod 600 ~/.ssh/icarus_models_deploy_key ssh-keyscan ${{ vars.MYHOST }} >> ~/.ssh/known_hosts eval $(ssh-agent -s) - ssh-add -v ~/.ssh/gitlab_deploy_key + ssh-add -v ~/.ssh/icarus_models_deploy_key cargo test fmt: @@ -115,12 +115,12 @@ jobs: - run: rustup component add rustfmt - run: | mkdir -p ~/.ssh - echo "${{ secrets.MYREPO_TOKEN }}" > ~/.ssh/gitlab_deploy_key - chmod 600 ~/.ssh/gitlab_deploy_key + echo "${{ secrets.MYREPO_TOKEN }}" > ~/.ssh/icarus_models_deploy_key + chmod 600 ~/.ssh/icarus_models_deploy_key ssh-keyscan ${{ vars.MYHOST }} >> ~/.ssh/known_hosts eval $(ssh-agent -s) - ssh-add -v ~/.ssh/gitlab_deploy_key + ssh-add -v ~/.ssh/icarus_models_deploy_key cargo fmt --all -- --check clippy: @@ -134,12 +134,12 @@ jobs: - run: rustup component add clippy - run: | mkdir -p ~/.ssh - echo "${{ secrets.MYREPO_TOKEN }}" > ~/.ssh/gitlab_deploy_key - chmod 600 ~/.ssh/gitlab_deploy_key + echo "${{ secrets.MYREPO_TOKEN }}" > ~/.ssh/icarus_models_deploy_key + chmod 600 ~/.ssh/icarus_models_deploy_key ssh-keyscan ${{ vars.MYHOST }} >> ~/.ssh/known_hosts eval $(ssh-agent -s) - ssh-add -v ~/.ssh/gitlab_deploy_key + ssh-add -v ~/.ssh/icarus_models_deploy_key cargo clippy -- -D warnings build: @@ -152,11 +152,11 @@ jobs: toolchain: 1.85.0 - run: | mkdir -p ~/.ssh - echo "${{ secrets.MYREPO_TOKEN }}" > ~/.ssh/gitlab_deploy_key - chmod 600 ~/.ssh/gitlab_deploy_key + echo "${{ secrets.MYREPO_TOKEN }}" > ~/.ssh/icarus_models_deploy_key + chmod 600 ~/.ssh/icarus_models_deploy_key ssh-keyscan ${{ vars.MYHOST }} >> ~/.ssh/known_hosts eval $(ssh-agent -s) - ssh-add -v ~/.ssh/gitlab_deploy_key + ssh-add -v ~/.ssh/icarus_models_deploy_key cargo build --release From d7c3443022ab3f3d62fe4e8eaa66a521074c4e98 Mon Sep 17 00:00:00 2001 From: phoenix Date: Sat, 5 Apr 2025 01:30:35 +0000 Subject: [PATCH 09/10] dynamic_db (#17) Reviewed-on: https://git.kundeng.us/phoenix/icarus_auth/pulls/17 Co-authored-by: phoenix Co-committed-by: phoenix --- .env.sample | 1 - .gitea/workflows/workflow.yml | 26 +-- Cargo.toml | 12 +- migrations/20250402221858_init_migrate.sql | 8 + run_migrations.txt | 22 ++ src/callers/register.rs | 41 +++- src/hashing/mod.rs | 73 +++++++ src/lib.rs | 17 +- src/main.rs | 231 +++++++++++++++++---- src/models/common.rs | 7 +- src/repo/mod.rs | 20 ++ 11 files changed, 367 insertions(+), 91 deletions(-) create mode 100644 src/hashing/mod.rs create mode 100644 src/repo/mod.rs diff --git a/.env.sample b/.env.sample index 1625b7a..135f9aa 100644 --- a/.env.sample +++ b/.env.sample @@ -1,2 +1 @@ DATABASE_URL=postgres://username:password@localhost/database_name -TEST_DATABASE_URL=postgres://username:password@localhost/database_name_test diff --git a/.gitea/workflows/workflow.yml b/.gitea/workflows/workflow.yml index acfa95b..087d74e 100644 --- a/.gitea/workflows/workflow.yml +++ b/.gitea/workflows/workflow.yml @@ -27,6 +27,7 @@ jobs: eval $(ssh-agent -s) ssh-add -v ~/.ssh/icarus_models_deploy_key + cargo check test: @@ -68,29 +69,10 @@ jobs: # NOTE: Do NOT use continue-on-error here. # If Docker isn't working as expected, the job SHOULD fail here. # --- Optional but Recommended: Database Migrations Step --- - - name: Run Database Migrations - env: - # Define TEST_DATABASE_URL using service details and secrets - TEST_DATABASE_URL: postgresql://${{ secrets.DB_TEST_USER || 'testuser' }}:${{ secrets.DB_TEST_PASSWORD || 'testpassword' }}@postgres:5432/${{ secrets.DB_TEST_NAME || 'testdb' }} - # Make SSH agent available if migrations fetch private dependencies - SSH_AUTH_SOCK: ${{ env.SSH_AUTH_SOCK }} - run: | - echo "Running database migrations..." - # ===> IMPORTANT: Replace placeholder below with your actual migration command <=== - # Example: Install and run sqlx-cli - # cargo install sqlx-cli --no-default-features --features native-tls,postgres - # sqlx database setup --database-url $TEST_DATABASE_URL - - # Example: Install and run diesel_cli - # cargo install diesel_cli --no-default-features --features postgres - # diesel migration run --database-url $TEST_DATABASE_URL - - # echo "[Placeholder] Your migration command goes here." - # ===> End of Placeholder <=== - name: Run tests env: - # Define TEST_DATABASE_URL for tests to use - TEST_DATABASE_URL: postgresql://${{ secrets.DB_TEST_USER || 'testuser' }}:${{ secrets.DB_TEST_PASSWORD || 'testpassword' }}@postgres:5432/${{ secrets.DB_TEST_NAME || 'testdb' }} + # Define DATABASE_URL for tests to use + DATABASE_URL: postgresql://${{ secrets.DB_TEST_USER || 'testuser' }}:${{ secrets.DB_TEST_PASSWORD || 'testpassword' }}@postgres:5432/${{ secrets.DB_TEST_NAME || 'testdb' }} RUST_LOG: info # Optional: configure test log level # Make SSH agent available if tests fetch private dependencies SSH_AUTH_SOCK: ${{ env.SSH_AUTH_SOCK }} @@ -102,6 +84,7 @@ jobs: eval $(ssh-agent -s) ssh-add -v ~/.ssh/icarus_models_deploy_key + cargo test fmt: @@ -159,4 +142,3 @@ jobs: eval $(ssh-agent -s) ssh-add -v ~/.ssh/icarus_models_deploy_key cargo build --release - diff --git a/Cargo.toml b/Cargo.toml index fac1e92..7cbd4f0 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -11,11 +11,15 @@ tokio = { version = "1.44.1", features = ["rt-multi-thread"] } tracing-subscriber = { version = "0.3.19" } tower = { version = "0.5.2" } hyper = { version = "1.6.0" } -sqlx = { version = "0.8.3", features = ["postgres", "runtime-tokio-native-tls"] } +sqlx = { version = "0.8.3", features = ["postgres", "runtime-tokio-native-tls", "uuid"] } dotenvy = { version = "0.15.7" } -icarus_models = { git = "ssh://git@git.kundeng.us/phoenix/icarus_models.git", tag = "v0.2.0" } +uuid = { version = "1.16.0", features = ["v4", "serde"] } +argon2 = { version = "0.5.3", features = ["std"] } # Use the latest 0.5.x version +rand = { version = "0.9" } +icarus_models = { git = "ssh://git@git.kundeng.us/phoenix/icarus_models.git", tag = "v0.3.0" } [dev-dependencies] -http-body-util = "0.1.3" +http-body-util = { version = "0.1.3" } +url = { version = "2.5" } reqwest = { version = "0.12.5", features = ["json"] } # For making HTTP requests in tests -once_cell = "1.19" # Useful for lazy initialization in tests/app setup +once_cell = { version = "1.19" } # Useful for lazy initialization in tests/app setup diff --git a/migrations/20250402221858_init_migrate.sql b/migrations/20250402221858_init_migrate.sql index 8ddc1d3..16796d7 100644 --- a/migrations/20250402221858_init_migrate.sql +++ b/migrations/20250402221858_init_migrate.sql @@ -1 +1,9 @@ -- Add migration script here +CREATE EXTENSION IF NOT EXISTS pgcrypto; + +CREATE TABLE IF NOT EXISTS "user" ( + id UUID PRIMARY KEY DEFAULT gen_random_uuid(), + username TEXT NOT NULL, + password TEXT NOT NULL, + date_created TIMESTAMPTZ NOT NULL DEFAULT NOW() +); diff --git a/run_migrations.txt b/run_migrations.txt index ae8892a..927b280 100644 --- a/run_migrations.txt +++ b/run_migrations.txt @@ -1,3 +1,25 @@ +# Make sure role has CREATEDB +ALTER ROLE username_that_needs_permission CREATEDB; + +# Install migrations cargo install sqlx-cli + +# Make sure to populate DATABASE_URL with correct value. +# By default, the DATABASE_URL found in .env file will be used +export DATABASE_URL="postgres://icarus_op_test:password@localhost/icarus_auth_test" + +# init sqlx migrate add init_migration sqlx migrate run + +# Create +sqlx database create + +# Drop +sqlx database drop + +# setup +sqlx database setup + +# Reset +sqlx database reset diff --git a/src/callers/register.rs b/src/callers/register.rs index 7a1299a..1462583 100644 --- a/src/callers/register.rs +++ b/src/callers/register.rs @@ -1,12 +1,47 @@ use axum::{Json, http::StatusCode}; use crate::models; +use crate::repo; + +mod response { + use serde::{Deserialize, Serialize}; + + use crate::models; + + #[derive(Deserialize, Serialize)] + pub struct Response { + pub message: String, + pub data: models::common::User, + } +} pub async fn register_user( + axum::Extension(pool): axum::Extension, Json(payload): Json, -) -> (StatusCode, Json) { - let user = models::common::User { +) -> (StatusCode, Json) { + let mut user = models::common::User { + id: uuid::Uuid::nil(), username: payload.username.clone(), + password: payload.password.clone(), }; - (StatusCode::CREATED, Json(user)) + + match repo::user::insert(&pool, &user).await { + Ok(id) => { + user.id = id; + ( + StatusCode::CREATED, + Json(response::Response { + message: String::from("User inserted"), + data: user, + }), + ) + } + Err(err) => ( + StatusCode::BAD_REQUEST, + Json(response::Response { + message: err.to_string(), + data: user, + }), + ), + } } diff --git a/src/hashing/mod.rs b/src/hashing/mod.rs new file mode 100644 index 0000000..eb7ed39 --- /dev/null +++ b/src/hashing/mod.rs @@ -0,0 +1,73 @@ +use argon2::{ + Argon2, // The Argon2 algorithm struct + PasswordVerifier, + password_hash::{ + PasswordHasher, + SaltString, + rand_core::OsRng, // Secure random number generator + }, +}; + +pub fn hash_password(password: &String) -> Result { + let password_bytes = password.as_bytes(); + + // Generate a random salt + // SaltString::generate uses OsRng internally for cryptographic security + let salt = SaltString::generate(&mut OsRng); + + // Create an Argon2 instance with default parameters (recommended) + // You could customize parameters here if needed, but defaults are strong + let argon2 = Argon2::default(); + + // Hash the password with the salt + // The output is a PasswordHash string format that includes algorithm, version, + // parameters, salt, and the hash itself. + let password_hash = argon2.hash_password(password_bytes, &salt)?.to_string(); + + Ok(password_hash) +} + +pub fn verify_password( + password_attempt: &String, + stored_hash: String, +) -> Result { + let password_bytes = password_attempt.as_bytes(); + + // Parse the stored hash string + // This extracts the salt, parameters, and hash digest + let parsed_hash = argon2::PasswordHash::new(stored_hash.as_str())?; + + // Create an Argon2 instance (it will use the parameters from the parsed hash) + let argon2 = Argon2::default(); + + // Verify the password against the parsed hash + // This automatically uses the correct salt and parameters embedded in `parsed_hash` + match argon2.verify_password(password_bytes, &parsed_hash) { + Ok(()) => Ok(true), // Passwords match + Err(argon2::password_hash::Error::Password) => Ok(false), // Passwords don't match + Err(e) => Err(e), // Some other error occurred (e.g., invalid hash format) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_hash_password() { + let some_password = String::from("somethingrandom"); + match hash_password(&some_password) { + Ok(p) => match verify_password(&some_password, p.clone()) { + Ok(res) => { + assert_eq!(res, true); + } + Err(err) => { + assert!(false, "Error: {:?}", err.to_string()); + } + }, + Err(eerr) => { + assert!(false, "Error: {:?}", eerr.to_string()); + } + } + } +} diff --git a/src/lib.rs b/src/lib.rs index 641a39d..891aac4 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,20 +1,15 @@ pub mod callers; pub mod config; +pub mod hashing; pub mod models; +pub mod repo; -mod keys { +pub mod keys { pub const DBURL: &str = "DATABASE_URL"; pub mod error { pub const ERROR: &str = "DATABASE_URL must be set in .env"; } - - pub mod test { - pub const DBURL: &str = "TEST_DATABASE_URL"; - pub mod error { - pub const ERROR: &str = "TEST_DATABASE_URL must be set in .env"; - } - } } mod connection_settings { @@ -42,10 +37,6 @@ pub mod db_pool { #[cfg(debug_assertions)] // Example: Only load .env in debug builds dotenvy::dotenv().ok(); - if cfg!(debug_assertions) { - env::var(keys::test::DBURL).expect(keys::test::error::ERROR) - } else { - env::var(keys::DBURL).expect(keys::error::ERROR) - } + env::var(keys::DBURL).expect(keys::error::ERROR) } } diff --git a/src/main.rs b/src/main.rs index 3e254b3..dfe28f5 100644 --- a/src/main.rs +++ b/src/main.rs @@ -1,19 +1,12 @@ -use axum::{ - Router, - routing::{get, post}, -}; -// use std::net::SocketAddr; - use icarus_auth::callers; use icarus_auth::config; -// use sqlx::Postgres; #[tokio::main] async fn main() { // initialize tracing tracing_subscriber::fmt::init(); - let app = app().await; + let app = init::app().await; // run our app with hyper, listening globally on port 3000 let url = config::get_full(); @@ -21,43 +14,143 @@ async fn main() { axum::serve(listener, app).await.unwrap(); } -async fn routes() -> Router { - // build our application with a route - Router::new() - .route(callers::endpoints::DBTEST, get(callers::common::db_ping)) - .route(callers::endpoints::ROOT, get(callers::common::root)) - .route( - callers::endpoints::REGISTER, - post(callers::register::register_user), - ) +mod db { + pub async fn migrations(pool: &sqlx::PgPool) { + // Run migrations using the sqlx::migrate! macro + // Assumes your migrations are in a ./migrations folder relative to Cargo.toml + sqlx::migrate!("./migrations") + .run(pool) + .await + .expect("Failed to run migrations on testcontainer DB"); + } } -async fn app() -> Router { - let pool = icarus_auth::db_pool::create_pool() - .await - .expect("Failed to create pool"); +mod init { + use axum::{ + Router, + routing::{get, post}, + }; - routes().await.layer(axum::Extension(pool)) + use crate::callers; + use crate::db; + + pub async fn routes() -> Router { + // build our application with a route + Router::new() + .route(callers::endpoints::DBTEST, get(callers::common::db_ping)) + .route(callers::endpoints::ROOT, get(callers::common::root)) + .route( + callers::endpoints::REGISTER, + post(callers::register::register_user), + ) + } + + pub async fn app() -> Router { + let pool = icarus_auth::db_pool::create_pool() + .await + .expect("Failed to create pool"); + + db::migrations(&pool).await; + + routes().await.layer(axum::Extension(pool)) + } } #[cfg(test)] mod tests { use super::*; + use axum::{ body::Body, - // extract::connect_info::MockConnectInfo, http::{Request, StatusCode}, }; use http_body_util::BodyExt; - // use http_body_util::BodyExt; // for `collect` - // use serde_json::{Value, json}; - // use tokio::net::TcpListener; - // use tower::{Service, ServiceExt}; // for `call`, `oneshot`, and `ready` + use serde::{Deserialize, Serialize}; + use serde_json::json; use tower::ServiceExt; // for `call`, `oneshot`, and `ready` + #[derive(Deserialize, Serialize)] + struct Response { + pub message: String, + pub data: icarus_auth::models::common::User, + } + + mod db_mgr { + use std::str::FromStr; + + use icarus_auth::keys; + + pub const LIMIT: usize = 6; + + pub async fn get_pool() -> Result { + let tm_db_url = std::env::var(keys::DBURL).expect("DATABASE_URL must be present"); + let tm_options = sqlx::postgres::PgConnectOptions::from_str(&tm_db_url).unwrap(); + sqlx::PgPool::connect_with(tm_options).await + } + + pub async fn generate_db_name() -> String { + let db_name = + get_database_name().unwrap() + &"_" + &uuid::Uuid::new_v4().to_string()[..LIMIT]; + db_name + } + + pub async fn connect_to_db(db_name: &str) -> Result { + let db_url = std::env::var(keys::DBURL).expect("DATABASE_URL must be set for tests"); + let options = sqlx::postgres::PgConnectOptions::from_str(&db_url)?.database(db_name); + sqlx::PgPool::connect_with(options).await + } + + pub async fn create_database( + template_pool: &sqlx::PgPool, + db_name: &str, + ) -> Result<(), sqlx::Error> { + let create_query = format!("CREATE DATABASE {}", db_name); + match sqlx::query(&create_query).execute(template_pool).await { + Ok(_) => Ok(()), + Err(e) => Err(e), + } + } + + // Function to drop a database + pub async fn drop_database( + template_pool: &sqlx::PgPool, + db_name: &str, + ) -> Result<(), sqlx::Error> { + let drop_query = format!("DROP DATABASE IF EXISTS {} WITH (FORCE)", db_name); + sqlx::query(&drop_query).execute(template_pool).await?; + Ok(()) + } + + pub fn get_database_name() -> Result> { + dotenvy::dotenv().ok(); // Load .env file if it exists + + match std::env::var(keys::DBURL) { + Ok(database_url) => { + let parsed_url = url::Url::parse(&database_url)?; + if parsed_url.scheme() == "postgres" || parsed_url.scheme() == "postgresql" { + match parsed_url + .path_segments() + .and_then(|segments| segments.last().map(|s| s.to_string())) + { + Some(sss) => Ok(sss), + None => Err("Error parsing".into()), + } + } else { + // Handle other database types if needed + Err("Error parsing".into()) + } + } + Err(_) => { + // DATABASE_URL environment variable not found + Err("Error parsing".into()) + } + } + } + } + #[tokio::test] - async fn hello_world() { - let app = app().await; + async fn test_hello_world() { + let app = init::app().await; // `Router` implements `tower::Service>` so we can // call it like any tower service, no need to run an HTTP server. @@ -73,24 +166,70 @@ mod tests { assert_eq!(response.status(), StatusCode::OK); - /* - match response.into_body().collect().await { - Ok(o) => { - let parsed: String = match String::from_utf8(o.to_bytes()) { - Ok(s) => s, - Err(err) => { - String::new() - } - }; - } - Err(err) => { - assert!(false, - "Error: {:?}", err.to_string()); - } - } - */ - let body = response.into_body().collect().await.unwrap().to_bytes(); assert_eq!(&body[..], b"Hello, World!"); } + + #[tokio::test] + async fn test_register_user() { + let tm_pool = db_mgr::get_pool().await.unwrap(); + + let db_name = db_mgr::generate_db_name().await; + + match db_mgr::create_database(&tm_pool, &db_name).await { + Ok(_) => { + println!("Success"); + } + Err(e) => { + assert!(false, "Error: {:?}", e.to_string()); + } + } + + let pool = db_mgr::connect_to_db(&db_name).await.unwrap(); + + db::migrations(&pool).await; + + let app = init::routes().await.layer(axum::Extension(pool)); + + let usr = icarus_auth::models::common::CreateUser { + username: String::from("somethingsss"), + password: String::from("Raindown!"), + }; + + let payload = json!({ + "username": &usr.username, + "password": &usr.password, + }); + + let response = app + .oneshot( + Request::builder() + .method(axum::http::Method::POST) + .uri(callers::endpoints::REGISTER) + .header(axum::http::header::CONTENT_TYPE, "application/json") + .body(Body::from(payload.to_string())) + .unwrap(), + ) + .await; + + match response { + Ok(resp) => { + assert_eq!(resp.status(), StatusCode::CREATED, "Message: {:?}", resp); + let body = axum::body::to_bytes(resp.into_body(), usize::MAX) + .await + .unwrap(); + let parsed_body: Response = serde_json::from_slice(&body).unwrap(); + + assert_eq!( + usr.username, parsed_body.data.username, + "Usernames do not match" + ); + } + Err(err) => { + assert!(false, "Error: {:?}", err.to_string()); + } + }; + + let _ = db_mgr::drop_database(&tm_pool, &db_name).await; + } } diff --git a/src/models/common.rs b/src/models/common.rs index cda15f2..7b978d5 100644 --- a/src/models/common.rs +++ b/src/models/common.rs @@ -1,11 +1,14 @@ use serde::{Deserialize, Serialize}; -#[derive(Deserialize)] +#[derive(Deserialize, Serialize)] pub struct CreateUser { pub username: String, + pub password: String, } -#[derive(Serialize)] +#[derive(Deserialize, Serialize)] pub struct User { + pub id: uuid::Uuid, pub username: String, + pub password: String, } diff --git a/src/repo/mod.rs b/src/repo/mod.rs new file mode 100644 index 0000000..95d3c7a --- /dev/null +++ b/src/repo/mod.rs @@ -0,0 +1,20 @@ +pub mod user { + use crate::models; + + pub async fn insert( + pool: &sqlx::PgPool, + user: &models::common::User, + ) -> Result { + let insert_sql = "INSERT INTO \"user\" (username, password) VALUES ($1, $2) RETURNING id"; + + match sqlx::query_scalar(insert_sql) + .bind(&user.username) // Bind the input message securely + .bind(&user.password) + .fetch_one(pool) // Execute and expect exactly ONE row with ONE column back + .await + { + Ok(o) => Ok(o), + Err(err) => Err(err), // _ => uuid::Uuid::nil(), + } + } +} From 7f5f1bae2f917bce3c911d3b41209b3764f3c3ce Mon Sep 17 00:00:00 2001 From: phoenix Date: Sat, 5 Apr 2025 19:26:58 +0000 Subject: [PATCH 10/10] Register endpoint (#16) Reviewed-on: https://git.kundeng.us/phoenix/icarus_auth/pulls/16 Co-authored-by: phoenix Co-committed-by: phoenix --- .gitea/workflows/tag_release.yml | 2 +- .gitea/workflows/workflow.yml | 11 +-- Cargo.toml | 8 +- migrations/20250402221858_init_migrate.sql | 15 ++- src/callers/register.rs | 92 ++++++++++++++---- src/hashing/mod.rs | 17 +++- src/lib.rs | 1 - src/main.rs | 34 ++++--- src/models/common.rs | 14 --- src/models/mod.rs | 1 - src/repo/mod.rs | 106 ++++++++++++++++++--- 11 files changed, 230 insertions(+), 71 deletions(-) delete mode 100644 src/models/common.rs delete mode 100644 src/models/mod.rs diff --git a/.gitea/workflows/tag_release.yml b/.gitea/workflows/tag_release.yml index 0ef5ff7..ca7ddef 100644 --- a/.gitea/workflows/tag_release.yml +++ b/.gitea/workflows/tag_release.yml @@ -19,7 +19,7 @@ jobs: - name: Install Rust uses: actions-rs/toolchain@v1 with: - toolchain: 1.85.0 + toolchain: 1.86.0 components: cargo - name: Extract Version from Cargo.toml diff --git a/.gitea/workflows/workflow.yml b/.gitea/workflows/workflow.yml index 087d74e..557a245 100644 --- a/.gitea/workflows/workflow.yml +++ b/.gitea/workflows/workflow.yml @@ -18,7 +18,7 @@ jobs: - uses: actions/checkout@v4 - uses: actions-rust-lang/setup-rust-toolchain@v1 with: - toolchain: 1.85.0 + toolchain: 1.86.0 - run: | mkdir -p ~/.ssh echo "${{ secrets.MYREPO_TOKEN }}" > ~/.ssh/icarus_models_deploy_key @@ -53,7 +53,7 @@ jobs: - uses: actions/checkout@v4 - uses: actions-rust-lang/setup-rust-toolchain@v1 with: - toolchain: 1.85.0 + toolchain: 1.86.0 # --- Add this step for explicit verification --- - name: Verify Docker Environment run: | @@ -68,7 +68,6 @@ jobs: echo "Docker environment check complete." # NOTE: Do NOT use continue-on-error here. # If Docker isn't working as expected, the job SHOULD fail here. - # --- Optional but Recommended: Database Migrations Step --- - name: Run tests env: # Define DATABASE_URL for tests to use @@ -94,7 +93,7 @@ jobs: - uses: actions/checkout@v4 - uses: actions-rust-lang/setup-rust-toolchain@v1 with: - toolchain: 1.85.0 + toolchain: 1.86.0 - run: rustup component add rustfmt - run: | mkdir -p ~/.ssh @@ -113,7 +112,7 @@ jobs: - uses: actions/checkout@v4 - uses: actions-rust-lang/setup-rust-toolchain@v1 with: - toolchain: 1.85.0 + toolchain: 1.86.0 - run: rustup component add clippy - run: | mkdir -p ~/.ssh @@ -132,7 +131,7 @@ jobs: - uses: actions/checkout@v4 - uses: actions-rust-lang/setup-rust-toolchain@v1 with: - toolchain: 1.85.0 + toolchain: 1.86.0 - run: | mkdir -p ~/.ssh echo "${{ secrets.MYREPO_TOKEN }}" > ~/.ssh/icarus_models_deploy_key diff --git a/Cargo.toml b/Cargo.toml index 7cbd4f0..59fd2d9 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,7 +1,8 @@ [package] name = "icarus_auth" -version = "0.1.1" +version = "0.2.0" edition = "2024" +rust-version = "1.86" [dependencies] axum = { version = "0.8.3" } @@ -11,12 +12,13 @@ tokio = { version = "1.44.1", features = ["rt-multi-thread"] } tracing-subscriber = { version = "0.3.19" } tower = { version = "0.5.2" } hyper = { version = "1.6.0" } -sqlx = { version = "0.8.3", features = ["postgres", "runtime-tokio-native-tls", "uuid"] } +sqlx = { version = "0.8.3", features = ["postgres", "runtime-tokio-native-tls", "time", "uuid"] } dotenvy = { version = "0.15.7" } uuid = { version = "1.16.0", features = ["v4", "serde"] } argon2 = { version = "0.5.3", features = ["std"] } # Use the latest 0.5.x version rand = { version = "0.9" } -icarus_models = { git = "ssh://git@git.kundeng.us/phoenix/icarus_models.git", tag = "v0.3.0" } +time = { version = "0.3.41", features = ["macros", "serde"] } +icarus_models = { git = "ssh://git@git.kundeng.us/phoenix/icarus_models.git", tag = "v0.4.0" } [dev-dependencies] http-body-util = { version = "0.1.3" } diff --git a/migrations/20250402221858_init_migrate.sql b/migrations/20250402221858_init_migrate.sql index 16796d7..8fe068c 100644 --- a/migrations/20250402221858_init_migrate.sql +++ b/migrations/20250402221858_init_migrate.sql @@ -5,5 +5,18 @@ CREATE TABLE IF NOT EXISTS "user" ( id UUID PRIMARY KEY DEFAULT gen_random_uuid(), username TEXT NOT NULL, password TEXT NOT NULL, - date_created TIMESTAMPTZ NOT NULL DEFAULT NOW() + email TEXT NOT NULL, + phone TEXT NOT NULL, + firstname TEXT NOT NULL, + lastname TEXT NOT NULL, + email_verified BOOL NOT NULL, + date_created TIMESTAMPTZ NOT NULL DEFAULT NOW(), + status TEXT NOT NULL, + last_login TIMESTAMPTZ NULL DEFAULT NOW(), + salt_id UUID NOT NULL +); + +CREATE TABLE IF NOT EXISTS "salt" ( + id UUID PRIMARY KEY DEFAULT gen_random_uuid(), + salt TEXT NOT NULL ); diff --git a/src/callers/register.rs b/src/callers/register.rs index 1462583..b9d07e4 100644 --- a/src/callers/register.rs +++ b/src/callers/register.rs @@ -1,46 +1,104 @@ use axum::{Json, http::StatusCode}; -use crate::models; +use crate::hashing; use crate::repo; -mod response { +pub mod request { use serde::{Deserialize, Serialize}; - use crate::models; + #[derive(Default, Deserialize, Serialize)] + pub struct Request { + #[serde(skip_serializing_if = "String::is_empty")] + pub username: String, + #[serde(skip_serializing_if = "String::is_empty")] + pub password: String, + #[serde(skip_serializing_if = "String::is_empty")] + pub email: String, + #[serde(skip_serializing_if = "String::is_empty")] + pub phone: String, + #[serde(skip_serializing_if = "String::is_empty")] + pub firstname: String, + #[serde(skip_serializing_if = "String::is_empty")] + pub lastname: String, + } +} + +pub mod response { + use serde::{Deserialize, Serialize}; #[derive(Deserialize, Serialize)] pub struct Response { pub message: String, - pub data: models::common::User, + pub data: Vec, } } pub async fn register_user( axum::Extension(pool): axum::Extension, - Json(payload): Json, + Json(payload): Json, ) -> (StatusCode, Json) { - let mut user = models::common::User { + let mut user = icarus_models::user::User { id: uuid::Uuid::nil(), username: payload.username.clone(), password: payload.password.clone(), + email: payload.email.clone(), + phone: payload.phone.clone(), + firstname: payload.firstname.clone(), + lastname: payload.lastname.clone(), + status: String::from("Active"), + email_verified: true, + date_created: Some(time::OffsetDateTime::now_utc()), + last_login: None, + salt_id: uuid::Uuid::nil(), }; - match repo::user::insert(&pool, &user).await { - Ok(id) => { - user.id = id; - ( - StatusCode::CREATED, - Json(response::Response { - message: String::from("User inserted"), - data: user, - }), - ) + match repo::user::exists(&pool, &user.username).await { + Ok(res) => { + if res { + ( + StatusCode::NOT_FOUND, + Json(response::Response { + message: String::from("Error"), + data: vec![user], + }), + ) + } else { + let salt_string = hashing::generate_salt().unwrap(); + let mut salt = icarus_models::user::salt::Salt::default(); + let generated_salt = salt_string; + salt.salt = generated_salt.to_string(); + salt.id = repo::salt::insert(&pool, &salt).await.unwrap(); + user.salt_id = salt.id; + let hashed_password = + hashing::hash_password(&user.password, &generated_salt).unwrap(); + user.password = hashed_password; + + match repo::user::insert(&pool, &user).await { + Ok(id) => { + user.id = id; + ( + StatusCode::CREATED, + Json(response::Response { + message: String::from("User created"), + data: vec![user], + }), + ) + } + Err(err) => ( + StatusCode::BAD_REQUEST, + Json(response::Response { + message: err.to_string(), + data: vec![user], + }), + ), + } + } } Err(err) => ( StatusCode::BAD_REQUEST, Json(response::Response { message: err.to_string(), - data: user, + data: vec![user], }), ), } diff --git a/src/hashing/mod.rs b/src/hashing/mod.rs index eb7ed39..1386d0c 100644 --- a/src/hashing/mod.rs +++ b/src/hashing/mod.rs @@ -8,12 +8,18 @@ use argon2::{ }, }; -pub fn hash_password(password: &String) -> Result { - let password_bytes = password.as_bytes(); - +pub fn generate_salt() -> Result { // Generate a random salt // SaltString::generate uses OsRng internally for cryptographic security let salt = SaltString::generate(&mut OsRng); + Ok(salt) +} + +pub fn hash_password( + password: &String, + salt: &SaltString, +) -> Result { + let password_bytes = password.as_bytes(); // Create an Argon2 instance with default parameters (recommended) // You could customize parameters here if needed, but defaults are strong @@ -22,7 +28,7 @@ pub fn hash_password(password: &String) -> Result match verify_password(&some_password, p.clone()) { Ok(res) => { assert_eq!(res, true); diff --git a/src/lib.rs b/src/lib.rs index 891aac4..1e67995 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,7 +1,6 @@ pub mod callers; pub mod config; pub mod hashing; -pub mod models; pub mod repo; pub mod keys { diff --git a/src/main.rs b/src/main.rs index dfe28f5..da52960 100644 --- a/src/main.rs +++ b/src/main.rs @@ -65,16 +65,9 @@ mod tests { http::{Request, StatusCode}, }; use http_body_util::BodyExt; - use serde::{Deserialize, Serialize}; use serde_json::json; use tower::ServiceExt; // for `call`, `oneshot`, and `ready` - #[derive(Deserialize, Serialize)] - struct Response { - pub message: String, - pub data: icarus_auth::models::common::User, - } - mod db_mgr { use std::str::FromStr; @@ -191,14 +184,22 @@ mod tests { let app = init::routes().await.layer(axum::Extension(pool)); - let usr = icarus_auth::models::common::CreateUser { + let usr = icarus_auth::callers::register::request::Request { username: String::from("somethingsss"), password: String::from("Raindown!"), + email: String::from("dev@null.com"), + phone: String::from("1234567890"), + firstname: String::from("Bob"), + lastname: String::from("Smith"), }; let payload = json!({ "username": &usr.username, "password": &usr.password, + "email": &usr.email, + "phone": &usr.phone, + "firstname": &usr.firstname, + "lastname": &usr.lastname, }); let response = app @@ -214,16 +215,27 @@ mod tests { match response { Ok(resp) => { - assert_eq!(resp.status(), StatusCode::CREATED, "Message: {:?}", resp); + assert_eq!( + resp.status(), + StatusCode::CREATED, + "Message: {:?} {:?}", + resp, + usr.username + ); let body = axum::body::to_bytes(resp.into_body(), usize::MAX) .await .unwrap(); - let parsed_body: Response = serde_json::from_slice(&body).unwrap(); + let parsed_body: callers::register::response::Response = + serde_json::from_slice(&body).unwrap(); + let returned_usr = &parsed_body.data[0]; + + assert_eq!(false, returned_usr.id.is_nil(), "Id is not populated"); assert_eq!( - usr.username, parsed_body.data.username, + usr.username, returned_usr.username, "Usernames do not match" ); + assert!(returned_usr.date_created.is_some(), "Date Created is empty"); } Err(err) => { assert!(false, "Error: {:?}", err.to_string()); diff --git a/src/models/common.rs b/src/models/common.rs deleted file mode 100644 index 7b978d5..0000000 --- a/src/models/common.rs +++ /dev/null @@ -1,14 +0,0 @@ -use serde::{Deserialize, Serialize}; - -#[derive(Deserialize, Serialize)] -pub struct CreateUser { - pub username: String, - pub password: String, -} - -#[derive(Deserialize, Serialize)] -pub struct User { - pub id: uuid::Uuid, - pub username: String, - pub password: String, -} diff --git a/src/models/mod.rs b/src/models/mod.rs deleted file mode 100644 index 34994bf..0000000 --- a/src/models/mod.rs +++ /dev/null @@ -1 +0,0 @@ -pub mod common; diff --git a/src/repo/mod.rs b/src/repo/mod.rs index 95d3c7a..049a840 100644 --- a/src/repo/mod.rs +++ b/src/repo/mod.rs @@ -1,20 +1,104 @@ pub mod user { - use crate::models; + use sqlx::Row; + + #[derive(Debug, serde::Serialize, sqlx::FromRow)] + pub struct InsertedData { + pub id: uuid::Uuid, + pub date_created: Option, + } + + pub async fn exists(pool: &sqlx::PgPool, username: &String) -> Result { + let result = sqlx::query( + r#" + SELECT 1 FROM "user" WHERE username = $1 + "#, + ) + .bind(username) + .fetch_optional(pool) + .await; + + match result { + Ok(r) => Ok(r.is_some()), + Err(e) => Err(e), + } + } pub async fn insert( pool: &sqlx::PgPool, - user: &models::common::User, + user: &icarus_models::user::User, ) -> Result { - let insert_sql = "INSERT INTO \"user\" (username, password) VALUES ($1, $2) RETURNING id"; - - match sqlx::query_scalar(insert_sql) - .bind(&user.username) // Bind the input message securely + let row = sqlx::query( + r#" + INSERT INTO "user" (username, password, email, phone, firstname, lastname, email_verified, status, salt_id) + VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9) + RETURNING id, date_created; + "#) + .bind(&user.username) .bind(&user.password) - .fetch_one(pool) // Execute and expect exactly ONE row with ONE column back - .await - { - Ok(o) => Ok(o), - Err(err) => Err(err), // _ => uuid::Uuid::nil(), + .bind(&user.email) + .bind(&user.phone) + .bind(&user.firstname) + .bind(&user.lastname) + .bind(user.email_verified) + .bind(&user.status) + .bind(user.salt_id) + .fetch_one(pool) + .await + .map_err(|e| { + eprintln!("Error inserting item: {}", e); + e + })?; + + let result = InsertedData { + id: row.try_get("id").map_err(|_e| sqlx::Error::RowNotFound)?, + date_created: row + .try_get("date_created") + .map_err(|_e| sqlx::Error::RowNotFound)?, + }; + + if !result.id.is_nil() { + Ok(result.id) + } else { + Err(sqlx::Error::RowNotFound) + } + } +} + +pub mod salt { + use sqlx::Row; + + #[derive(Debug, serde::Serialize, sqlx::FromRow)] + pub struct InsertedData { + pub id: uuid::Uuid, + } + + pub async fn insert( + pool: &sqlx::PgPool, + salt: &icarus_models::user::salt::Salt, + ) -> Result { + let row = sqlx::query( + r#" + INSERT INTO "salt" (salt) + VALUES ($1) + RETURNING id; + "#, + ) + .bind(&salt.salt) + .fetch_one(pool) + .await + .map_err(|e| { + eprintln!("Error inserting item: {}", e); + e + })?; + + let result = InsertedData { + id: row.try_get("id").map_err(|_e| sqlx::Error::RowNotFound)?, + }; + + if !result.id.is_nil() { + Ok(result.id) + } else { + Err(sqlx::Error::RowNotFound) } } }