Compare commits

...

11 commits

Author SHA1 Message Date
16aada55c5
Add readme
and update container's alpine
2025-06-17 10:58:29 -07:00
ba1a5f728c
cargo fmt 2025-06-17 10:11:50 -07:00
ed715102c0
Docs & sqlx prep: Improved documentation and update sqlx preparation files for offline builds 2025-06-17 10:08:38 -07:00
69b741fe13 Fixed all warnings 2025-06-16 20:05:01 -07:00
623cc2bbaa Added documentation and reformatted files 2025-06-10 19:09:39 -07:00
2b47bb113e Added documentation for various functions 2025-06-10 19:03:35 -07:00
47f8e01210
Auth: Add function to get AuthInfo for Request without using an Extractor 2025-06-10 17:43:46 -07:00
1ac49dbb60 Corrected lock file 2025-06-10 16:25:07 -07:00
806ad1343b Corrected lock file 2025-06-10 16:24:51 -07:00
b3ddae6008 Merge branch 'identity' into dev
# Conflicts:
#	Cargo.lock
#	Cargo.toml
#	src/auth.rs
#	src/main.rs
2025-06-10 16:19:20 -07:00
ed2620c8b8 Feat (Sealing): Encryption of Secrets (#1)
This adds support for encrypting and decrypting secrets.
It implements the APIs required for unsealing.
The APIs are not complete or compliant.

Reviewed-on: #1
Co-authored-by: C0ffeeCode <ritters_werth@outlook.com>
Co-committed-by: C0ffeeCode <ritters_werth@outlook.com>
2025-04-03 10:08:08 +02:00
30 changed files with 862 additions and 555 deletions

View file

@ -0,0 +1,44 @@
{
"db_name": "SQLite",
"query": "SELECT service_token.* FROM service_token, service_token_role_membership\n WHERE service_token.id = service_token_role_membership.token_id AND\n service_token_role_membership.role_name = 'root'\n LIMIT 1",
"describe": {
"columns": [
{
"name": "id",
"ordinal": 0,
"type_info": "Text"
},
{
"name": "key",
"ordinal": 1,
"type_info": "Text"
},
{
"name": "expiry",
"ordinal": 2,
"type_info": "Integer"
},
{
"name": "parent_id",
"ordinal": 3,
"type_info": "Text"
},
{
"name": "identity_id",
"ordinal": 4,
"type_info": "Text"
}
],
"parameters": {
"Right": 0
},
"nullable": [
false,
false,
true,
true,
true
]
},
"hash": "0aa5c76c9ea1692da29a0f39998946d230f92a8f252294b25afeabe05749f4ca"
}

View file

@ -0,0 +1,44 @@
{
"db_name": "SQLite",
"query": "SELECT * FROM 'service_token' WHERE key = $1 AND (expiry IS NULL OR expiry > $2) LIMIT 1",
"describe": {
"columns": [
{
"name": "id",
"ordinal": 0,
"type_info": "Text"
},
{
"name": "key",
"ordinal": 1,
"type_info": "Text"
},
{
"name": "expiry",
"ordinal": 2,
"type_info": "Integer"
},
{
"name": "parent_id",
"ordinal": 3,
"type_info": "Text"
},
{
"name": "identity_id",
"ordinal": 4,
"type_info": "Text"
}
],
"parameters": {
"Right": 2
},
"nullable": [
false,
false,
true,
true,
true
]
},
"hash": "2cbe2fbcd5d8fb6d489f9e3cc7e04182f226964ea9d84219abbe6958dcccfefe"
}

View file

@ -0,0 +1,26 @@
{
"db_name": "SQLite",
"query": "SELECT * FROM 'service_token_role_membership' WHERE token_id = $1",
"describe": {
"columns": [
{
"name": "role_name",
"ordinal": 0,
"type_info": "Text"
},
{
"name": "token_id",
"ordinal": 1,
"type_info": "Text"
}
],
"parameters": {
"Right": 1
},
"nullable": [
false,
false
]
},
"hash": "36485bb70f499346cd1be569887ea8b6f438f4f845ef883e80d58875b839500a"
}

View file

@ -0,0 +1,32 @@
{
"db_name": "SQLite",
"query": "SELECT encrypted_key, type as protection_type, nonce FROM root_key ORDER BY version LIMIT 1",
"describe": {
"columns": [
{
"name": "encrypted_key",
"ordinal": 0,
"type_info": "Blob"
},
{
"name": "protection_type",
"ordinal": 1,
"type_info": "Text"
},
{
"name": "nonce",
"ordinal": 2,
"type_info": "Blob"
}
],
"parameters": {
"Right": 0
},
"nullable": [
false,
false,
true
]
},
"hash": "5630a591626bd416be0d1ab12fa993055b521e81382897d247ceee1b41f0bf42"
}

View file

@ -0,0 +1,20 @@
{
"db_name": "SQLite",
"query": "\nWITH latest_version AS (\n SELECT MAX(version_number) AS max_version\n FROM kv2_secret_version\n WHERE engine_path = $1 AND secret_path = $2 -- engine_path AND secret_path\n)\nINSERT INTO kv2_secret_version (engine_path, secret_path, nonce, encrypted_data, created_time, version_number)\nVALUES (\n $1, -- engine_path\n $2, -- secret_path\n $3, -- nonce\n $4, -- encrypted_data\n $5, -- created_time\n CASE -- Use provided version if given\n WHEN $6 IS NOT NULL THEN $6 -- version_number (optional)\n ELSE COALESCE((SELECT max_version FROM latest_version) + 1, 1) -- otherwise 1\n END -- version_number logic\n)\nRETURNING version_number;\n",
"describe": {
"columns": [
{
"name": "version_number",
"ordinal": 0,
"type_info": "Integer"
}
],
"parameters": {
"Right": 6
},
"nullable": [
false
]
},
"hash": "8f7bfd1840d14efec44c7b59ab10461ff122ead43076ad841883a9dd189a4f37"
}

View file

@ -0,0 +1,12 @@
{
"db_name": "SQLite",
"query": "\n INSERT INTO root_key (encrypted_key, type, version, nonce)\n VALUES ($1, $2, 1, $3)\n ",
"describe": {
"columns": [],
"parameters": {
"Right": 3
},
"nullable": []
},
"hash": "aa131c57e0e255bfe07488095bdf25ab39e9dee182d0aecf988c9d3c2d04e66d"
}

View file

@ -1,31 +1,36 @@
{ {
"db_name": "SQLite", "db_name": "SQLite",
"query": "SELECT secret_data, created_time, deletion_time, version_number, secret_path\n FROM kv2_secret_version WHERE engine_path = $1 AND secret_path = $2 AND deletion_time IS NULL\n ORDER BY version_number DESC LIMIT 1", "query": "SELECT nonce, encrypted_data, created_time, deletion_time, version_number, secret_path\n FROM kv2_secret_version WHERE engine_path = $1 AND secret_path = $2 AND deletion_time IS NULL\n ORDER BY version_number DESC LIMIT 1",
"describe": { "describe": {
"columns": [ "columns": [
{ {
"name": "secret_data", "name": "nonce",
"ordinal": 0, "ordinal": 0,
"type_info": "Text" "type_info": "Blob"
},
{
"name": "encrypted_data",
"ordinal": 1,
"type_info": "Blob"
}, },
{ {
"name": "created_time", "name": "created_time",
"ordinal": 1,
"type_info": "Datetime"
},
{
"name": "deletion_time",
"ordinal": 2, "ordinal": 2,
"type_info": "Datetime" "type_info": "Datetime"
}, },
{ {
"name": "version_number", "name": "deletion_time",
"ordinal": 3, "ordinal": 3,
"type_info": "Datetime"
},
{
"name": "version_number",
"ordinal": 4,
"type_info": "Integer" "type_info": "Integer"
}, },
{ {
"name": "secret_path", "name": "secret_path",
"ordinal": 4, "ordinal": 5,
"type_info": "Text" "type_info": "Text"
} }
], ],
@ -33,6 +38,7 @@
"Right": 2 "Right": 2
}, },
"nullable": [ "nullable": [
false,
false, false,
false, false,
true, true,
@ -40,5 +46,5 @@
false false
] ]
}, },
"hash": "844de8351a0ed204e2080857373507389c90453b5d3ad92344272838958ab28e" "hash": "b78c62fe22c4e93c54ecbc0c0cdfa31387baf14bea1ac8d27170e8b6cb456114"
} }

View file

@ -1,20 +0,0 @@
{
"db_name": "SQLite",
"query": "\nWITH latest_version AS (\n SELECT MAX(version_number) AS max_version\n FROM kv2_secret_version\n WHERE engine_path = $1 AND secret_path = $2 -- engine_path AND secret_path\n)\nINSERT INTO kv2_secret_version (engine_path, secret_path, secret_data, created_time, version_number)\nVALUES (\n $1, -- engine_path\n $2, -- secret_path\n $3, -- secret_data\n $4, -- created_time\n CASE -- Use provided version if given\n WHEN $5 IS NOT NULL THEN $5 -- version_number (optional)\n ELSE COALESCE((SELECT max_version FROM latest_version) + 1, 0)\n END -- version_number logic\n)\nRETURNING version_number;\n",
"describe": {
"columns": [
{
"name": "version_number",
"ordinal": 0,
"type_info": "Integer"
}
],
"parameters": {
"Right": 5
},
"nullable": [
false
]
},
"hash": "c6beeb7d8672039df5258ada802920aae8f16db215dda5ab447dbe832f4a6703"
}

View file

@ -1,31 +1,36 @@
{ {
"db_name": "SQLite", "db_name": "SQLite",
"query": "SELECT secret_data, created_time, deletion_time, version_number, secret_path\n FROM kv2_secret_version WHERE engine_path = $1 AND secret_path = $2 AND deletion_time IS NULL\n AND version_number = $3", "query": "SELECT nonce, encrypted_data, created_time, deletion_time, version_number, secret_path\n FROM kv2_secret_version WHERE engine_path = $1 AND secret_path = $2 AND deletion_time IS NULL\n AND version_number = $3",
"describe": { "describe": {
"columns": [ "columns": [
{ {
"name": "secret_data", "name": "nonce",
"ordinal": 0, "ordinal": 0,
"type_info": "Text" "type_info": "Blob"
},
{
"name": "encrypted_data",
"ordinal": 1,
"type_info": "Blob"
}, },
{ {
"name": "created_time", "name": "created_time",
"ordinal": 1,
"type_info": "Datetime"
},
{
"name": "deletion_time",
"ordinal": 2, "ordinal": 2,
"type_info": "Datetime" "type_info": "Datetime"
}, },
{ {
"name": "version_number", "name": "deletion_time",
"ordinal": 3, "ordinal": 3,
"type_info": "Datetime"
},
{
"name": "version_number",
"ordinal": 4,
"type_info": "Integer" "type_info": "Integer"
}, },
{ {
"name": "secret_path", "name": "secret_path",
"ordinal": 4, "ordinal": 5,
"type_info": "Text" "type_info": "Text"
} }
], ],
@ -33,6 +38,7 @@
"Right": 3 "Right": 3
}, },
"nullable": [ "nullable": [
false,
false, false,
false, false,
true, true,
@ -40,5 +46,5 @@
false false
] ]
}, },
"hash": "919758dd0aee1053065d62d528bca5bbd5220b909b6c1b5eb5c77ce0dd2259e4" "hash": "fa8c74205ae4d497983d394ee04181c08d20cdb4a93bfce3c06a114133cd6619"
} }

View file

@ -0,0 +1,12 @@
{
"db_name": "SQLite",
"query": "\n INSERT INTO service_token (id, key) VALUES ($1, $2);\n INSERT INTO service_token_role_membership (token_id, role_name) VALUES ($3, 'root');\n ",
"describe": {
"columns": [],
"parameters": {
"Right": 3
},
"nullable": []
},
"hash": "fe6bf34448b9f9defc27ce30a128935d991cd06e22861086c3b1377916731e57"
}

589
Cargo.lock generated

File diff suppressed because it is too large Load diff

View file

@ -1,4 +1,4 @@
ARG alpine_version=3.21 ARG alpine_version=3.22
FROM docker.io/library/rust:1-alpine${alpine_version} AS builder FROM docker.io/library/rust:1-alpine${alpine_version} AS builder

30
README.md Normal file
View file

@ -0,0 +1,30 @@
# rvault
rvault is an open-source implementation of the API of Vault and OpenBao, written in Rust.
## Running
You can run an offline build with `SQLX_OFFLINE=true cargo run` or `build`, respectively.
An offline build requires an up-to-date SQLx preparation.
An OCI container image can be created using `podman build . -t rvault`.
Furthermore, rvault attempts to read a `.env` file in the current working directory.
For example, its content could be:
```txt
DATABASE_URL=sqlite:test.db
RUST_LOG=debug
```
## Development
SQLx preparation can be updated with `cargo sqlx prep`.
Hence, it is not useful for development.
With `cargo sqlx database reset` the database will be recreated,
deleting all contents and reapplying migrations.
This is helpful when changing migrations during development.
When running a normal, not-offline, build, the database must be migrated (e.g. using `cargo sqlx database reset`)
for compilation of compile-time-checked queries.

View file

@ -1,16 +1,14 @@
pub(crate) mod token;
pub mod auth_extractor; pub mod auth_extractor;
pub(crate) mod token;
use axum::Router;
use crate::auth::token::*; use crate::auth::token::*;
use crate::storage::DbPool; use crate::storage::DbPool;
use axum::Router;
// route prefix: `/auth/token/` /// Authentication routes
// mod token;
// use self::token::token_auth_router;
pub fn auth_router(pool: DbPool) -> Router<DbPool> { pub fn auth_router(pool: DbPool) -> Router<DbPool> {
Router::new().nest("/token", token_auth_router(pool.clone())).with_state(pool) // The token auth router handles all token-related authentication routes
// .nest("/token", token_auth_router()) Router::new()
.nest("/token", token_auth_router(pool.clone()))
.with_state(pool)
} }

View file

@ -1,37 +1,66 @@
use std::fmt::Debug; use crate::auth::token::{TokenDTO, get_roles_from_token, get_token_from_key};
use crate::storage::DbPool;
use axum::body::Body;
use axum::extract::FromRequestParts; use axum::extract::FromRequestParts;
use axum::http::request::Parts; use axum::http::request::Parts;
use axum::http::{header, StatusCode}; use axum::http::{HeaderMap, Request, StatusCode, header};
use crate::auth::token::{get_roles_from_token, get_token_from_key, TokenDTO}; use std::fmt::Debug;
use crate::storage::DbPool;
// Currently unused but for usage in the future
#[allow(unused)]
/// AuthInfo is an extractor that retrieves authentication information from the request.
#[derive(Debug)] #[derive(Debug)]
pub struct AuthInfo { pub struct AuthInfo {
token: TokenDTO, token: TokenDTO,
roles: Vec<String>, roles: Vec<String>,
} }
impl<> FromRequestParts<DbPool> for AuthInfo impl FromRequestParts<DbPool> for AuthInfo {
{
type Rejection = StatusCode; type Rejection = StatusCode;
async fn from_request_parts(parts: &mut Parts, state: &DbPool) -> Result<Self, Self::Rejection> { /// Extracts authentication information from the request parts.
let auth_header = parts async fn from_request_parts(
.headers parts: &mut Parts,
state: &DbPool,
) -> Result<Self, Self::Rejection> {
let header = &parts.headers;
inspect_with_header(state, header).await
}
}
// Currently unused but for usage in the future
#[allow(unused)]
/// Extracts the headers from request and returns the result from inspect_with_header function.
pub async fn inspect_req(state: &DbPool, req: &Request<Body>) -> Result<AuthInfo, StatusCode> {
let header = req.headers();
inspect_with_header(state, header).await
}
/// Inspects the request headers and extracts authentication information.
/// Returns an `AuthInfo` struct containing the token and roles if successful.
/// If the authorization header is missing or invalid, it returns a `StatusCode::UNAUTHORIZED`.
///
/// This function is intentionally separated so it can be used from
/// within the Axum extractor as well as in other functions.
pub async fn inspect_with_header(
state: &DbPool,
header: &HeaderMap,
) -> Result<AuthInfo, StatusCode> {
let auth_header = header
.get(header::AUTHORIZATION) .get(header::AUTHORIZATION)
.and_then(|value| value.to_str().ok()); .and_then(|value| value.to_str().ok());
match auth_header { match auth_header {
Some(auth_header) => { Some(auth_value) => {
let token = get_token_from_key(auth_header, state).await; let token = get_token_from_key(auth_value, state).await;
if token.is_err() { if token.is_err() {
return Err(StatusCode::UNAUTHORIZED); return Err(StatusCode::UNAUTHORIZED);
} }
let token = token.unwrap(); let token = token.unwrap();
let roles = get_roles_from_token(&token, state).await; let roles = get_roles_from_token(&token, state).await;
Ok(Self {token, roles}) Ok(AuthInfo { token, roles })
}
_ => Err(StatusCode::UNAUTHORIZED),
} }
None => Err(StatusCode::UNAUTHORIZED),
} }
} }

View file

@ -1,26 +1,28 @@
use std::ops::Index; // There are some placeholder functions, that will have to be implemented before the first release.
use axum::extract::{Path, Query, State}; // They are marked with `todo!()` to indicate that they need to be implemented.
use axum::{Json, Router}; // We want to keep these functions in the codebase.
use axum::response::{IntoResponse, NoContent, Response}; // That is why we choose to suppress unused warnings for now.
use axum::routing::post; // TODO
use log::error; #![allow(unused)]
use serde::Deserialize;
use sqlx::Error;
use rand::{distributions::Alphanumeric, Rng};
use uuid::Uuid;
use crate::storage::DbPool; use crate::storage::DbPool;
use axum::extract::State;
use axum::http::StatusCode;
use axum::response::{IntoResponse, Response};
use axum::routing::post;
use axum::{Json, Router};
use log::error;
use rand::{Rng, distributions::Alphanumeric};
use serde::{Deserialize, Serialize};
use sqlx::Error;
use uuid::Uuid;
enum TokenType { #[derive(Debug, Serialize)]
}
#[derive(Debug)]
pub struct IdentityDTO { pub struct IdentityDTO {
id: String, id: String,
name: String name: String,
} }
#[derive(Debug)] #[derive(Debug)]
pub struct TokenDTO { pub struct TokenDTO {
key: String, key: String,
@ -36,11 +38,27 @@ pub struct TokenRoleMembershipDTO {
token_id: String, token_id: String,
} }
/// Represents a request body for the `/auth/token/lookup` endpoint.
#[derive(Deserialize)] #[derive(Deserialize)]
struct RequestBodyPostLookup { struct RequestBodyPostLookup {
token: String, token: String,
} }
/// Represents the response body for the `/auth/token/lookup` endpoint.
#[derive(Serialize)]
struct TokenLookupResponse {
id: String,
type_name: String,
roles: Vec<String>,
}
/// Represents an error response for the API.
#[derive(Serialize)]
struct ErrorResponse {
error: String,
}
/// Generates a random string of the specified length using alphanumeric characters.
// TODO: Make string generation secure // TODO: Make string generation secure
fn get_random_string(len: usize) -> String { fn get_random_string(len: usize) -> String {
rand::thread_rng() rand::thread_rng()
@ -50,47 +68,62 @@ fn get_random_string(len: usize) -> String {
.collect() .collect()
} }
// Returns if a token was created or not. Prints out the created token to the console. /// Creates a root token if none exists in the database.
/// Returns true if a new root token was created, false if one already exists.
pub async fn create_root_token_if_none_exist(pool: &DbPool) -> bool { pub async fn create_root_token_if_none_exist(pool: &DbPool) -> bool {
// Check if a root token already exists
let exists = sqlx::query!( let exists = sqlx::query!(
r#"SELECT service_token.* FROM service_token, service_token_role_membership r#"SELECT service_token.* FROM service_token, service_token_role_membership
WHERE service_token.id = service_token_role_membership.token_id AND WHERE service_token.id = service_token_role_membership.token_id AND
service_token_role_membership.role_name = 'root' service_token_role_membership.role_name = 'root'
LIMIT 1"#).fetch_one(pool).await LIMIT 1"#
)
.fetch_one(pool)
.await
.is_ok(); .is_ok();
if exists { if exists {
return false; return false;
} }
// If no root token exists, create one
let result = create_root_token(pool).await; let result = create_root_token(pool).await;
if result.is_err() { if result.is_err() {
let error = result.err().unwrap(); let error = result.err().unwrap();
error!("create_root_token failed: {:?}", error); // Log the error and panic
panic!("create_root_token failed: {:?}", error); error!("create_root_token failed: {error:?}");
panic!("create_root_token failed: {error:?}");
} }
// If successful, print the root token. This will only happen once.
println!("\n\nYour root token is: {}", result.unwrap()); println!("\n\nYour root token is: {}", result.unwrap());
println!("It will only be displayed once!\n\n"); println!("It will only be displayed once!\n\n");
true true
} }
// Return the token key if successful /// Creates a root token in the database.
async fn create_root_token(pool: &DbPool) -> Result<String, Error> { async fn create_root_token(pool: &DbPool) -> Result<String, Error> {
let id = Uuid::new_v4().to_string(); let id = Uuid::new_v4().to_string();
let key = "s.".to_string() + &get_random_string(24); let key = "s.".to_string() + &get_random_string(24);
// Insert the root token into the database
let result = sqlx::query!(r#" let result = sqlx::query!(r#"
INSERT INTO service_token (id, key) VALUES ($1, $2); INSERT INTO service_token (id, key) VALUES ($1, $2);
INSERT INTO service_token_role_membership (token_id, role_name) VALUES ($3, 'root'); INSERT INTO service_token_role_membership (token_id, role_name) VALUES ($3, 'root');
"#, id, key, id).execute(pool).await; "#, id, key, id).execute(pool).await;
// If the insert was successful, return the key
if result.is_ok() { if result.is_ok() {
return Ok(key); return Ok(key);
} }
// Else, return the error
Err(result.unwrap_err()) Err(result.unwrap_err())
} }
// Gets the current time in seconds since unix epoch /// Gets the current time in seconds since unix epoch
fn get_time_as_int() -> i64 { fn get_time_as_int() -> i64 {
std::time::SystemTime::now().duration_since(std::time::UNIX_EPOCH).unwrap().as_secs() as i64 std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap()
.as_secs() as i64
} }
/// Gets the type of token. (The first character of the key always specifies the type)
fn get_token_type(token: &TokenDTO) -> Result<String, &str> { fn get_token_type(token: &TokenDTO) -> Result<String, &str> {
Ok(match token.key.clone().chars().next().unwrap_or('?') { Ok(match token.key.clone().chars().next().unwrap_or('?') {
's' => "service", 's' => "service",
@ -100,9 +133,13 @@ fn get_token_type(token: &TokenDTO) -> Result<String, &str> {
error!("Unsupported token type"); error!("Unsupported token type");
return Err("Unsupported token type"); return Err("Unsupported token type");
} }
}.to_string()) }
.to_string())
} }
/// Retrieves a token from the database using its key.
/// If the token is found and not expired, it returns the token.
/// Else, it returns an error.
pub async fn get_token_from_key(token_key: &str, pool: &DbPool) -> Result<TokenDTO, Error> { pub async fn get_token_from_key(token_key: &str, pool: &DbPool) -> Result<TokenDTO, Error> {
let time = get_time_as_int(); let time = get_time_as_int();
sqlx::query_as!( sqlx::query_as!(
@ -111,65 +148,139 @@ pub async fn get_token_from_key(token_key: &str, pool: &DbPool) -> Result<TokenD
token_key, time).fetch_one(pool).await token_key, time).fetch_one(pool).await
} }
pub async fn get_roles_from_token(token: &TokenDTO, pool:&DbPool) -> Vec<String> { /// Retrieves the roles associated with a given token from the database.
/// If the token does not exist, it returns an empty vector.
pub async fn get_roles_from_token(token: &TokenDTO, pool: &DbPool) -> Vec<String> {
let result = sqlx::query_as!( let result = sqlx::query_as!(
TokenRoleMembershipDTO, TokenRoleMembershipDTO,
r#"SELECT * FROM 'service_token_role_membership' WHERE token_id = $1"#, r#"SELECT * FROM 'service_token_role_membership' WHERE token_id = $1"#,
token.id).fetch_all(pool).await; token.id
result.unwrap_or(Vec::new()).iter().map(|r| r.role_name.to_string()).collect() )
.fetch_all(pool)
.await;
result
.unwrap_or(Vec::new())
.iter()
.map(|r| r.role_name.to_string())
.collect()
} }
/// Return a router, that may be used to route traffic to the corresponding handlers
pub fn token_auth_router(pool: DbPool) -> Router<DbPool> { pub fn token_auth_router(pool: DbPool) -> Router<DbPool> {
Router::new() Router::new()
.route("/lookup", post(post_lookup)) .route("/lookup", post(post_lookup))
.with_state(pool) .with_state(pool)
} }
/// Handles the `/auth/token/lookup` endpoint.
/// Retrieves the token and its associated roles from the database using the provided token key.
/// The output format does not yet match the openBao specification and is for testing only!
async fn post_lookup( async fn post_lookup(
State(pool): State<DbPool>, State(pool): State<DbPool>,
Json(body): Json<RequestBodyPostLookup> Json(body): Json<RequestBodyPostLookup>,
) -> Result<Response, ()> { ) -> Response {
let token = body.token; let token_str = body.token;
// Validate the token string
Ok(IntoResponse::into_response(token)) match get_token_from_key(&token_str, &pool).await {
// If the token is found, retrieve its type and roles
Ok(token) => {
let type_name = get_token_type(&token).unwrap_or_else(|_| String::from("Unknown"));
let roles = get_roles_from_token(&token, &pool).await;
let resp = TokenLookupResponse {
id: token.id,
type_name,
roles,
};
// Return the token information as a JSON response
(StatusCode::OK, axum::Json(resp)).into_response()
}
// If the token is not found, return a 404 Not Found error
Err(e) => {
error!("Failed to retrieve token: {e:?}");
let err = ErrorResponse {
error: "Failed to retrieve token".to_string(),
};
(StatusCode::NOT_FOUND, axum::Json(err)).into_response()
}
}
} }
async fn get_accessors() {} //
// The following functions are placeholders for the various token-related operations.
//
async fn post_create() {} async fn get_accessors() -> &'static str {
todo!("not implemented")
}
async fn post_create_orphan() {} async fn post_create() -> &'static str {
todo!("not implemented")
}
async fn post_create_role() {} async fn post_create_orphan() -> &'static str {
todo!("not implemented")
}
async fn get_lookup() {} async fn post_create_role() -> &'static str {
todo!("not implemented")
}
async fn get_lookup() -> &'static str {
todo!("not implemented")
}
async fn get_lookup_self() {} async fn get_lookup_self() -> &'static str {
todo!("not implemented")
}
async fn post_lookup_self() {} async fn post_lookup_self() -> &'static str {
todo!("not implemented")
}
async fn post_renew() {} async fn post_renew() -> &'static str {
todo!("not implemented")
}
async fn post_renew_accessor() {} async fn post_renew_accessor() -> &'static str {
todo!("not implemented")
}
async fn post_renew_self() {} async fn post_renew_self() -> &'static str {
todo!("not implemented")
}
async fn post_revoke() {} async fn post_revoke() -> &'static str {
todo!("not implemented")
}
async fn post_revoke_accessor() {} async fn post_revoke_accessor() -> &'static str {
todo!("not implemented")
}
async fn post_revoke_orphan() {} async fn post_revoke_orphan() -> &'static str {
todo!("not implemented")
}
async fn post_revoke_self() {} async fn post_revoke_self() -> &'static str {
todo!("not implemented")
}
async fn get_roles() {} async fn get_roles() -> &'static str {
todo!("not implemented")
}
async fn get_role_by_name() {} async fn get_role_by_name() -> &'static str {
todo!("not implemented")
}
async fn post_role_by_name() {} async fn post_role_by_name() -> &'static str {
todo!("not implemented")
}
async fn delete_role_by_name() {} async fn delete_role_by_name() -> &'static str {
todo!("not implemented")
}
async fn post_tidy() {} async fn post_tidy() -> &'static str {
todo!("not implemented")
}

View file

@ -1,8 +1,8 @@
use axum::{ use axum::{
Json,
body::Body, body::Body,
http::StatusCode, http::StatusCode,
response::{IntoResponse, Response}, response::{IntoResponse, Response},
Json,
}; };
use serde::Serialize; use serde::Serialize;
@ -13,22 +13,11 @@ pub struct HttpError {
} }
impl HttpError { impl HttpError {
pub fn new(status_code: StatusCode, errors: Vec<String>) -> Response<Body> { pub fn multiple_errors(status_code: StatusCode, errors: Vec<String>) -> Response<Body> {
(status_code, Json(HttpError { errors })).into_response() (status_code, Json(HttpError { errors })).into_response()
} }
pub fn simple(status_code: StatusCode, error: impl ToString) -> Response<Body> { pub fn simple(status_code: StatusCode, error: impl ToString) -> Response<Body> {
HttpError::new(status_code, vec![error.to_string(); 1]) HttpError::multiple_errors(status_code, vec![error.to_string(); 1])
}
}
/// Custom serialization function for `secret_data`
pub fn serialize_reject_none<S>(value: &Option<String>, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
match value {
Some(data) => serializer.serialize_str(data),
None => Err(serde::ser::Error::custom("`secret_data` must not be None during serialization!")),
} }
} }

View file

@ -1,11 +1,11 @@
pub mod kv; pub mod kv;
use axum::{ use axum::{
Extension, Router,
body::Body, body::Body,
extract::{Request, State}, extract::{Request, State},
http::{StatusCode, Uri}, http::{StatusCode, Uri},
response::{IntoResponse, Response}, response::{IntoResponse, Response},
Extension, Router,
}; };
use log::*; use log::*;
use tower::Service; use tower::Service;
@ -14,7 +14,7 @@ use crate::{common::HttpError, storage::DbPool};
#[derive(Clone)] #[derive(Clone)]
/// State to be used to store the database pool /// State to be used to store the database pool
/// and the routers for each engine /// and the routers for each engine.
struct EngineMapperState { struct EngineMapperState {
pool: DbPool, pool: DbPool,
kv_v2: Router, kv_v2: Router,
@ -23,7 +23,8 @@ struct EngineMapperState {
#[derive(Clone)] #[derive(Clone)]
struct EnginePath(String); struct EnginePath(String);
/// Secret engine router /// Secret engine router.
/// Dynamically puts requests into routers depending on database content.
pub fn secrets_router(pool: DbPool) -> Router<DbPool> { pub fn secrets_router(pool: DbPool) -> Router<DbPool> {
// State containing the pool and engine routers // State containing the pool and engine routers
let state = EngineMapperState { let state = EngineMapperState {
@ -42,7 +43,7 @@ async fn engine_handler(
req: Request, req: Request,
) -> Response<Body> { ) -> Response<Body> {
if let Some((mount_path, engine_type)) = map_mount_points(req.uri(), &engines.pool).await { if let Some((mount_path, engine_type)) = map_mount_points(req.uri(), &engines.pool).await {
info!("Found mount point {} of type {}", mount_path, engine_type); info!("Found mount point {mount_path} of type {engine_type}");
// Match the engine type to the appropriate router // Match the engine type to the appropriate router
match engine_type.as_str() { match engine_type.as_str() {
"kv_v2" => call_router(engines.kv_v2, mount_path, req).await, "kv_v2" => call_router(engines.kv_v2, mount_path, req).await,
@ -72,7 +73,7 @@ async fn call_router(engine: Router, mount_path: String, mut req: Request) -> Re
/// Occurs when the mount path is found in the database /// Occurs when the mount path is found in the database
/// but the registered is unknown /// but the registered is unknown
fn unknown_engine(engine_type: String) -> impl IntoResponse { fn unknown_engine(engine_type: String) -> impl IntoResponse {
error!("Engine type {} not implemented", engine_type); error!("Engine type {engine_type} not implemented");
HttpError::simple( HttpError::simple(
StatusCode::INTERNAL_SERVER_ERROR, StatusCode::INTERNAL_SERVER_ERROR,
format!("Engine type {engine_type} not implemented"), format!("Engine type {engine_type} not implemented"),

View file

@ -1,15 +1,12 @@
mod structs;
mod data; mod data;
mod meta; mod meta;
mod structs;
// #[cfg(test)] // #[cfg(test)]
// mod tests; // mod tests;
use crate::storage::DbPool; use crate::storage::DbPool;
use axum::{ use axum::{Router, routing::*};
Router,
routing::*,
};
pub fn kv_router(pool: DbPool) -> Router { pub fn kv_router(pool: DbPool) -> Router {
Router::new() Router::new()

View file

@ -1,8 +1,19 @@
// There are some placeholder functions, that will have to be implemented before the first release.
// They are marked with `todo!()` to indicate that they need to be implemented.
// We want to keep these functions in the codebase.
// That is why we choose to suppress unused warnings for now.
// TODO
#![allow(unused)]
use super::structs::KvV2WriteRequest; use super::structs::KvV2WriteRequest;
use crate::{ use crate::{
common::HttpError, engines::{ DbPool,
kv::structs::{KvSecretData, KvSecretRes, KvV2WriteResponse, Wrapper}, EnginePath common::HttpError,
}, storage::sealing::Secret, DbPool engines::{
EnginePath,
kv::structs::{KvSecretData, KvSecretRes, KvV2WriteResponse, Wrapper},
},
storage::sealing::Secret,
}; };
use axum::{ use axum::{
Extension, Json, Extension, Json,
@ -121,7 +132,10 @@ pub async fn post_data(
let content = serde_json::to_string(&secret.data).unwrap(); let content = serde_json::to_string(&secret.data).unwrap();
let Secret { nonce, protected_data } = Secret::encrypt(&content).await.unwrap(); let Secret {
nonce,
protected_data,
} = Secret::encrypt(&content).await.unwrap();
let nonce = nonce.as_slice(); let nonce = nonce.as_slice();
let mut tx = pool.begin().await.unwrap(); let mut tx = pool.begin().await.unwrap();
@ -234,7 +248,6 @@ pub async fn patch_data(
Path(kv_path): Path<String>, Path(kv_path): Path<String>,
Extension(EnginePath(engine_path)): Extension<EnginePath>, Extension(EnginePath(engine_path)): Extension<EnginePath>,
Json(secret): Json<KvV2WriteRequest>, Json(secret): Json<KvV2WriteRequest>,
) -> Result<Response, ()> { ) -> &'static str {
// TODO: implement only application/merge-patch+json todo!("not implemented")
todo!("Not implemented")
} }

View file

@ -1,3 +1,10 @@
// There are some placeholder functions, that will have to be implemented before the first release.
// They are marked with `todo!()` to indicate that they need to be implemented.
// We want to keep these functions in the codebase.
// That is why we choose to suppress unused warnings for now.
// TODO
#![allow(unused)]
use crate::storage::DbPool; use crate::storage::DbPool;
use axum::extract::{Path, State}; use axum::extract::{Path, State};

View file

@ -1,3 +1,9 @@
// There are some placeholder functions, that will have to be implemented before the first release.
// They are marked with `todo!()` to indicate that they need to be implemented.
// We want to keep these functions in the codebase.
// That is why we choose to suppress unused warnings for now.
#![allow(unused)]
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use std::{collections::HashMap, vec}; use std::{collections::HashMap, vec};
use time::{OffsetDateTime, UtcDateTime, serde::rfc3339}; use time::{OffsetDateTime, UtcDateTime, serde::rfc3339};

View file

@ -1,66 +0,0 @@
use std::collections::HashMap;
use chrono::Utc;
use tests::{
logic::patch_metadata,
structs::{SecretMeta, VersionMeta},
};
use super::*;
#[test]
#[cfg(target_feature = "_disabled")]
fn print_serialized_test() {
let temp_secret = TempSecret {
content: String::from("Hallo"),
version: 12,
};
let serialized = serialize_secret_json(&temp_secret);
println!("string serialized: {:?}", serialized);
let deserialized = deserialize_secret_struct(&serialized.unwrap());
println!(
"Struct field from deserialized: {}",
deserialized.unwrap().content
)
}
#[test]
#[cfg(target_feature = "_disabled")]
fn test_patching() {
// TODO add more assertions
let mut base = create_mock_meta();
println!("OLD metadata: {:?}", base);
let overwrite: SecretMeta = SecretMeta {
max_versions: 10,
versions: vec![VersionMeta {
created_time: Utc::now(),
deletion_time: Some(Utc::now()),
destroyed: true,
}],
cas_required: true,
delete_version_after: "10m".to_string(),
current_version: 4,
oldest_version: 2,
updated_time: Utc::now(),
created_time: Utc::now(),
custom_metadata: Some(HashMap::new()),
};
let mut patched: Option<SecretMeta> = None; // Laurenz here
match patch_metadata(&mut base, &overwrite) {
Ok(meta) => {
println!("NEW metadata: {:?}", meta);
println!("patched successfully");
patched = Some(meta);
}
Err(e) => {
log::error!("error patching metadata: {}", e);
panic!("Patching failed");
}
}
if let Some(patched_meta) = patched {
assert!(patched_meta.current_version == 4);
assert!(patched_meta.versions[0].destroyed == true);
} else {
panic!("patched was not initialized");
}
}

View file

@ -1,5 +1,12 @@
#![forbid(unsafe_code)] #![forbid(unsafe_code)]
// // There are some placeholder functions, that will have to be implemented before the first release.
// // They are marked with `todo!()` to indicate that they need to be implemented.
// // We want to keep these functions in the codebase.
// // That is why we choose to suppress unused warnings for now.
// #![allow(unused)]
use crate::common::HttpError;
use axum::{ use axum::{
Router, Router,
extract::Request, extract::Request,
@ -12,8 +19,6 @@ use log::*;
use std::{env, net::SocketAddr, str::FromStr}; use std::{env, net::SocketAddr, str::FromStr};
use storage::DbPool; use storage::DbPool;
use tokio::{net::TcpListener, signal}; use tokio::{net::TcpListener, signal};
use crate::auth::auth_extractor::AuthInfo;
use crate::common::HttpError;
mod auth; mod auth;
mod common; mod common;
@ -24,11 +29,8 @@ mod sys;
#[tokio::main] #[tokio::main]
async fn main() { async fn main() {
// NOTE: Rethink choice of environment variables in regards to security in the future
let _ = dotenvy::dotenv(); let _ = dotenvy::dotenv();
// To be configured via environment variables
// choose from (highest to lowest): error, warn, info, debug, trace, off
// env::set_var("RUST_LOG", "trace"); // TODO: Remove to respect user configuration
// env::set_var("DATABASE_URL", "sqlite:test.db"); // TODO: move to .env
env_logger::init(); env_logger::init();
// Listen on all IPv4 and IPv6 interfaces on port 8200 by default // Listen on all IPv4 and IPv6 interfaces on port 8200 by default
@ -65,13 +67,15 @@ async fn main() {
.unwrap(); .unwrap();
} }
/// Middleware setting unspecified `Content-Type`s to json since this is done by client libraries.
/// Axum's [axum::extract::Json] rejects extraction attempts without json content type.
async fn set_default_content_type_json( async fn set_default_content_type_json(
mut req: Request, mut req: Request,
next: Next, next: Next,
) -> Result<impl IntoResponse, Response> { ) -> Result<impl IntoResponse, Response> {
if req.headers().get("content-type").is_none() { if req.headers().get("content-type").is_none() {
let headers = req.headers_mut(); let headers = req.headers_mut();
// debug!("Request header: \n{:?}", headers);
headers.insert("content-type", "application/json".parse().unwrap()); headers.insert("content-type", "application/json".parse().unwrap());
} }
@ -105,6 +109,7 @@ async fn shutdown_signal(pool: DbPool) {
} }
/// Fallback route for unknown routes /// Fallback route for unknown routes
///
/// Note: `/v1/*` is handled by [`engines::secrets_router`] /// Note: `/v1/*` is handled by [`engines::secrets_router`]
async fn fallback_route_unknown(req: Request) -> Response { async fn fallback_route_unknown(req: Request) -> Response {
log::error!( log::error!(
@ -117,8 +122,8 @@ async fn fallback_route_unknown(req: Request) -> Response {
HttpError::simple(StatusCode::NOT_FOUND, "Route not implemented") HttpError::simple(StatusCode::NOT_FOUND, "Route not implemented")
} }
/// basic handler that responds with a static string /// Basic handler that responds with a static string
async fn root(test: AuthInfo) -> &'static str { async fn root() -> &'static str {
println!("AuthInfo: {test:?}"); info!("Hello world");
"Hello, World!" "Hello, World!"
} }

View file

@ -3,11 +3,18 @@ pub mod sealing;
use std::{fs::File, path::Path}; use std::{fs::File, path::Path};
use log::*; use log::*;
use sqlx::{sqlite::SqlitePoolOptions, Pool, Sqlite}; use sqlx::{Pool, Sqlite, sqlite::SqlitePoolOptions};
pub(crate) type DbType = Sqlite; pub(crate) type DbType = Sqlite;
pub(crate) type DbPool = Pool<DbType>; pub(crate) type DbPool = Pool<DbType>;
/// Creates a SQLx SQLite database pool.
/// If nonexistent, it creates a new SQLite file.
///
/// Note: rvault uses compile-time queries.
/// Hence, during development a migrated SQLite file is required.
/// Use `cargo sqlx database reset` if required.
/// Otherwise, set the env var `SQLX_OFFLINE=true` during compilation (not helpful for development).
pub async fn create_pool(db_url: String) -> DbPool { pub async fn create_pool(db_url: String) -> DbPool {
// Create SQLite database file if it does not exist // Create SQLite database file if it does not exist
if db_url.starts_with("sqlite:") && db_url != ("sqlite::memory:") { if db_url.starts_with("sqlite:") && db_url != ("sqlite::memory:") {

View file

@ -212,10 +212,6 @@ impl Secret {
/// Encrypt a secret /// Encrypt a secret
/// ///
/// # Panics
///
/// Panics if .
///
/// # Errors /// # Errors
/// ///
/// This function will return an error if the vault is uninitialized or an unknown error occurs. /// This function will return an error if the vault is uninitialized or an unknown error occurs.
@ -321,11 +317,11 @@ pub async fn init_default(pool: &DbPool) {
#[cfg(feature = "shamir")] #[cfg(feature = "shamir")]
{ {
shamir::init_shamir(&pool, 2, 5).await shamir::init_shamir(pool, 2, 5).await
} }
}; };
let success = prepare_unseal(&pool).await; let success = prepare_unseal(pool).await;
warn!("New sealing password generated: {user_key:?}"); warn!("New sealing password generated: {user_key:?}");
assert!( assert!(
success, success,

View file

@ -15,7 +15,7 @@ use zeroize::ZeroizeOnDrop;
use crate::DbPool; use crate::DbPool;
use super::{write_new_root_key, Sealing, UnsealResult}; use super::{Sealing, UnsealResult, write_new_root_key};
type P256Share = DefaultShare<IdentifierPrimeField<Scalar>, IdentifierPrimeField<Scalar>>; type P256Share = DefaultShare<IdentifierPrimeField<Scalar>, IdentifierPrimeField<Scalar>>;
@ -29,6 +29,8 @@ struct ShamirPortion {
} }
#[derive(PartialEq)] #[derive(PartialEq)]
/// Container for multiple [ShamirPortion]s and the protected root key.
/// Multiple instances could exist in the future for per-namespace encryption.
pub struct ShamirBucket { pub struct ShamirBucket {
portions: Vec<ShamirPortion>, portions: Vec<ShamirPortion>,
protected_rk: Vec<u8>, protected_rk: Vec<u8>,
@ -66,7 +68,7 @@ impl Sealing for ShamirBucket {
} }
self.portions.push(key_portion); self.portions.push(key_portion);
let abc = match join_keys(&self.portions) { let joined_keys = match join_keys(&self.portions) {
Ok(v) => v, Ok(v) => v,
Err(e) => { Err(e) => {
return match e { return match e {
@ -84,7 +86,7 @@ impl Sealing for ShamirBucket {
} }
.to_bytes(); .to_bytes();
let cipher = match Aes256GcmSiv::new_from_slice(&abc) { let cipher = match Aes256GcmSiv::new_from_slice(&joined_keys) {
Ok(v) => v, Ok(v) => v,
Err(e) => { Err(e) => {
info!("Cipher could not be created from slice: {e}"); info!("Cipher could not be created from slice: {e}");

View file

@ -6,7 +6,7 @@ use base64::{Engine, prelude::BASE64_STANDARD};
use crate::DbPool; use crate::DbPool;
use super::{write_new_root_key, Sealing, UnsealResult}; use super::{Sealing, UnsealResult, write_new_root_key};
/// Pair of protected root key and nonce /// Pair of protected root key and nonce
#[derive(PartialEq)] #[derive(PartialEq)]
@ -26,6 +26,7 @@ impl Sealing for SimpleSealing {
} }
} }
/// Initialize the vault with a simple password
#[allow(unused)] #[allow(unused)]
pub async fn init_simple(pool: &DbPool) -> String { pub async fn init_simple(pool: &DbPool) -> String {
let root_key = Aes256GcmSiv::generate_key(&mut OsRng); let root_key = Aes256GcmSiv::generate_key(&mut OsRng);

View file

@ -5,8 +5,10 @@ use crate::DbPool;
pub fn root_generation() -> Router<DbPool> { pub fn root_generation() -> Router<DbPool> {
Router::new() Router::new()
// .route("/generate-root", get(get_root_generation_attempt)) // .route("/generate-root", get(get_root_generation_attempt))
.route("/generate-root", post(generate_new_root))
// .route("/generate-root", delete(cancel_generate_root)) // .route("/generate-root", delete(cancel_generate_root))
.route("/generate-root", post(generate_new_root))
} }
async fn generate_new_root() {} async fn generate_new_root() {
todo!()
}

View file

@ -1,5 +1,7 @@
use axum::{ use axum::{
extract::State, routing::{get, post, put}, Json, Router Json, Router,
extract::State,
routing::{get, post, put},
}; };
use log::warn; use log::warn;
use serde::Deserialize; use serde::Deserialize;
@ -11,7 +13,7 @@ pub fn sealing_routes() -> Router<DbPool> {
.route("/seal", post(seal_post)) .route("/seal", post(seal_post))
.route("/seal-status", get(seal_status_get)) .route("/seal-status", get(seal_status_get))
.route("/unseal", post(unseal_post)) .route("/unseal", post(unseal_post))
// WTF? Again? Its supposed to be POST but actually a PUT // Again? Its supposed to be POST but actually a PUT
.route("/unseal", put(unseal_post)) .route("/unseal", put(unseal_post))
} }
@ -47,4 +49,6 @@ async fn unseal_post(State(pool): State<DbPool>, Json(req): Json<UnsealRequest>)
Ok(()) Ok(())
} }
async fn seal_status_get(State(pool): State<DbPool>) {} async fn seal_status_get(State(_pool): State<DbPool>) -> &'static str {
todo!("not implemented")
}