Docs & sqlx prep: Improved documentation and update sqlx preparation files for offline builds

This commit is contained in:
Laurenz 2025-06-17 10:08:38 -07:00
parent 69b741fe13
commit ed715102c0
Signed by: C0ffeeCode
SSH key fingerprint: SHA256:prvFOyBjButRypyXm7X8lbbCkly2Dq1PF7e/mrsPVjw
25 changed files with 292 additions and 144 deletions

View file

@ -0,0 +1,44 @@
{
"db_name": "SQLite",
"query": "SELECT service_token.* FROM service_token, service_token_role_membership\n WHERE service_token.id = service_token_role_membership.token_id AND\n service_token_role_membership.role_name = 'root'\n LIMIT 1",
"describe": {
"columns": [
{
"name": "id",
"ordinal": 0,
"type_info": "Text"
},
{
"name": "key",
"ordinal": 1,
"type_info": "Text"
},
{
"name": "expiry",
"ordinal": 2,
"type_info": "Integer"
},
{
"name": "parent_id",
"ordinal": 3,
"type_info": "Text"
},
{
"name": "identity_id",
"ordinal": 4,
"type_info": "Text"
}
],
"parameters": {
"Right": 0
},
"nullable": [
false,
false,
true,
true,
true
]
},
"hash": "0aa5c76c9ea1692da29a0f39998946d230f92a8f252294b25afeabe05749f4ca"
}

View file

@ -0,0 +1,44 @@
{
"db_name": "SQLite",
"query": "SELECT * FROM 'service_token' WHERE key = $1 AND (expiry IS NULL OR expiry > $2) LIMIT 1",
"describe": {
"columns": [
{
"name": "id",
"ordinal": 0,
"type_info": "Text"
},
{
"name": "key",
"ordinal": 1,
"type_info": "Text"
},
{
"name": "expiry",
"ordinal": 2,
"type_info": "Integer"
},
{
"name": "parent_id",
"ordinal": 3,
"type_info": "Text"
},
{
"name": "identity_id",
"ordinal": 4,
"type_info": "Text"
}
],
"parameters": {
"Right": 2
},
"nullable": [
false,
false,
true,
true,
true
]
},
"hash": "2cbe2fbcd5d8fb6d489f9e3cc7e04182f226964ea9d84219abbe6958dcccfefe"
}

View file

@ -0,0 +1,26 @@
{
"db_name": "SQLite",
"query": "SELECT * FROM 'service_token_role_membership' WHERE token_id = $1",
"describe": {
"columns": [
{
"name": "role_name",
"ordinal": 0,
"type_info": "Text"
},
{
"name": "token_id",
"ordinal": 1,
"type_info": "Text"
}
],
"parameters": {
"Right": 1
},
"nullable": [
false,
false
]
},
"hash": "36485bb70f499346cd1be569887ea8b6f438f4f845ef883e80d58875b839500a"
}

View file

@ -0,0 +1,32 @@
{
"db_name": "SQLite",
"query": "SELECT encrypted_key, type as protection_type, nonce FROM root_key ORDER BY version LIMIT 1",
"describe": {
"columns": [
{
"name": "encrypted_key",
"ordinal": 0,
"type_info": "Blob"
},
{
"name": "protection_type",
"ordinal": 1,
"type_info": "Text"
},
{
"name": "nonce",
"ordinal": 2,
"type_info": "Blob"
}
],
"parameters": {
"Right": 0
},
"nullable": [
false,
false,
true
]
},
"hash": "5630a591626bd416be0d1ab12fa993055b521e81382897d247ceee1b41f0bf42"
}

View file

@ -0,0 +1,20 @@
{
"db_name": "SQLite",
"query": "\nWITH latest_version AS (\n SELECT MAX(version_number) AS max_version\n FROM kv2_secret_version\n WHERE engine_path = $1 AND secret_path = $2 -- engine_path AND secret_path\n)\nINSERT INTO kv2_secret_version (engine_path, secret_path, nonce, encrypted_data, created_time, version_number)\nVALUES (\n $1, -- engine_path\n $2, -- secret_path\n $3, -- nonce\n $4, -- encrypted_data\n $5, -- created_time\n CASE -- Use provided version if given\n WHEN $6 IS NOT NULL THEN $6 -- version_number (optional)\n ELSE COALESCE((SELECT max_version FROM latest_version) + 1, 1) -- otherwise 1\n END -- version_number logic\n)\nRETURNING version_number;\n",
"describe": {
"columns": [
{
"name": "version_number",
"ordinal": 0,
"type_info": "Integer"
}
],
"parameters": {
"Right": 6
},
"nullable": [
false
]
},
"hash": "8f7bfd1840d14efec44c7b59ab10461ff122ead43076ad841883a9dd189a4f37"
}

View file

@ -0,0 +1,12 @@
{
"db_name": "SQLite",
"query": "\n INSERT INTO root_key (encrypted_key, type, version, nonce)\n VALUES ($1, $2, 1, $3)\n ",
"describe": {
"columns": [],
"parameters": {
"Right": 3
},
"nullable": []
},
"hash": "aa131c57e0e255bfe07488095bdf25ab39e9dee182d0aecf988c9d3c2d04e66d"
}

View file

@ -1,31 +1,36 @@
{ {
"db_name": "SQLite", "db_name": "SQLite",
"query": "SELECT secret_data, created_time, deletion_time, version_number, secret_path\n FROM kv2_secret_version WHERE engine_path = $1 AND secret_path = $2 AND deletion_time IS NULL\n ORDER BY version_number DESC LIMIT 1", "query": "SELECT nonce, encrypted_data, created_time, deletion_time, version_number, secret_path\n FROM kv2_secret_version WHERE engine_path = $1 AND secret_path = $2 AND deletion_time IS NULL\n ORDER BY version_number DESC LIMIT 1",
"describe": { "describe": {
"columns": [ "columns": [
{ {
"name": "secret_data", "name": "nonce",
"ordinal": 0, "ordinal": 0,
"type_info": "Text" "type_info": "Blob"
},
{
"name": "encrypted_data",
"ordinal": 1,
"type_info": "Blob"
}, },
{ {
"name": "created_time", "name": "created_time",
"ordinal": 1,
"type_info": "Datetime"
},
{
"name": "deletion_time",
"ordinal": 2, "ordinal": 2,
"type_info": "Datetime" "type_info": "Datetime"
}, },
{ {
"name": "version_number", "name": "deletion_time",
"ordinal": 3, "ordinal": 3,
"type_info": "Datetime"
},
{
"name": "version_number",
"ordinal": 4,
"type_info": "Integer" "type_info": "Integer"
}, },
{ {
"name": "secret_path", "name": "secret_path",
"ordinal": 4, "ordinal": 5,
"type_info": "Text" "type_info": "Text"
} }
], ],
@ -33,6 +38,7 @@
"Right": 2 "Right": 2
}, },
"nullable": [ "nullable": [
false,
false, false,
false, false,
true, true,
@ -40,5 +46,5 @@
false false
] ]
}, },
"hash": "844de8351a0ed204e2080857373507389c90453b5d3ad92344272838958ab28e" "hash": "b78c62fe22c4e93c54ecbc0c0cdfa31387baf14bea1ac8d27170e8b6cb456114"
} }

View file

@ -1,20 +0,0 @@
{
"db_name": "SQLite",
"query": "\nWITH latest_version AS (\n SELECT MAX(version_number) AS max_version\n FROM kv2_secret_version\n WHERE engine_path = $1 AND secret_path = $2 -- engine_path AND secret_path\n)\nINSERT INTO kv2_secret_version (engine_path, secret_path, secret_data, created_time, version_number)\nVALUES (\n $1, -- engine_path\n $2, -- secret_path\n $3, -- secret_data\n $4, -- created_time\n CASE -- Use provided version if given\n WHEN $5 IS NOT NULL THEN $5 -- version_number (optional)\n ELSE COALESCE((SELECT max_version FROM latest_version) + 1, 0)\n END -- version_number logic\n)\nRETURNING version_number;\n",
"describe": {
"columns": [
{
"name": "version_number",
"ordinal": 0,
"type_info": "Integer"
}
],
"parameters": {
"Right": 5
},
"nullable": [
false
]
},
"hash": "c6beeb7d8672039df5258ada802920aae8f16db215dda5ab447dbe832f4a6703"
}

View file

@ -1,31 +1,36 @@
{ {
"db_name": "SQLite", "db_name": "SQLite",
"query": "SELECT secret_data, created_time, deletion_time, version_number, secret_path\n FROM kv2_secret_version WHERE engine_path = $1 AND secret_path = $2 AND deletion_time IS NULL\n AND version_number = $3", "query": "SELECT nonce, encrypted_data, created_time, deletion_time, version_number, secret_path\n FROM kv2_secret_version WHERE engine_path = $1 AND secret_path = $2 AND deletion_time IS NULL\n AND version_number = $3",
"describe": { "describe": {
"columns": [ "columns": [
{ {
"name": "secret_data", "name": "nonce",
"ordinal": 0, "ordinal": 0,
"type_info": "Text" "type_info": "Blob"
},
{
"name": "encrypted_data",
"ordinal": 1,
"type_info": "Blob"
}, },
{ {
"name": "created_time", "name": "created_time",
"ordinal": 1,
"type_info": "Datetime"
},
{
"name": "deletion_time",
"ordinal": 2, "ordinal": 2,
"type_info": "Datetime" "type_info": "Datetime"
}, },
{ {
"name": "version_number", "name": "deletion_time",
"ordinal": 3, "ordinal": 3,
"type_info": "Datetime"
},
{
"name": "version_number",
"ordinal": 4,
"type_info": "Integer" "type_info": "Integer"
}, },
{ {
"name": "secret_path", "name": "secret_path",
"ordinal": 4, "ordinal": 5,
"type_info": "Text" "type_info": "Text"
} }
], ],
@ -33,6 +38,7 @@
"Right": 3 "Right": 3
}, },
"nullable": [ "nullable": [
false,
false, false,
false, false,
true, true,
@ -40,5 +46,5 @@
false false
] ]
}, },
"hash": "919758dd0aee1053065d62d528bca5bbd5220b909b6c1b5eb5c77ce0dd2259e4" "hash": "fa8c74205ae4d497983d394ee04181c08d20cdb4a93bfce3c06a114133cd6619"
} }

View file

@ -0,0 +1,12 @@
{
"db_name": "SQLite",
"query": "\n INSERT INTO service_token (id, key) VALUES ($1, $2);\n INSERT INTO service_token_role_membership (token_id, role_name) VALUES ($3, 'root');\n ",
"describe": {
"columns": [],
"parameters": {
"Right": 3
},
"nullable": []
},
"hash": "fe6bf34448b9f9defc27ce30a128935d991cd06e22861086c3b1377916731e57"
}

View file

@ -6,6 +6,8 @@ use axum::http::request::Parts;
use axum::http::{HeaderMap, Request, StatusCode, header}; use axum::http::{HeaderMap, Request, StatusCode, header};
use std::fmt::Debug; use std::fmt::Debug;
// Currently unused but for usage in the future
#[allow(unused)]
/// AuthInfo is an extractor that retrieves authentication information from the request. /// AuthInfo is an extractor that retrieves authentication information from the request.
#[derive(Debug)] #[derive(Debug)]
pub struct AuthInfo { pub struct AuthInfo {
@ -13,10 +15,10 @@ pub struct AuthInfo {
roles: Vec<String>, roles: Vec<String>,
} }
/// Extracts authentication information from the request parts.
impl FromRequestParts<DbPool> for AuthInfo { impl FromRequestParts<DbPool> for AuthInfo {
type Rejection = StatusCode; type Rejection = StatusCode;
/// Extracts authentication information from the request parts.
async fn from_request_parts( async fn from_request_parts(
parts: &mut Parts, parts: &mut Parts,
state: &DbPool, state: &DbPool,
@ -27,6 +29,8 @@ impl FromRequestParts<DbPool> for AuthInfo {
} }
} }
// Currently unused but for usage in the future
#[allow(unused)]
/// Extracts the headers from request and returns the result from inspect_with_header function. /// Extracts the headers from request and returns the result from inspect_with_header function.
pub async fn inspect_req(state: &DbPool, req: &Request<Body>) -> Result<AuthInfo, StatusCode> { pub async fn inspect_req(state: &DbPool, req: &Request<Body>) -> Result<AuthInfo, StatusCode> {
let header = req.headers(); let header = req.headers();
@ -36,6 +40,9 @@ pub async fn inspect_req(state: &DbPool, req: &Request<Body>) -> Result<AuthInfo
/// Inspects the request headers and extracts authentication information. /// Inspects the request headers and extracts authentication information.
/// Returns an `AuthInfo` struct containing the token and roles if successful. /// Returns an `AuthInfo` struct containing the token and roles if successful.
/// If the authorization header is missing or invalid, it returns a `StatusCode::UNAUTHORIZED`. /// If the authorization header is missing or invalid, it returns a `StatusCode::UNAUTHORIZED`.
///
/// This function is intentionally separated so it can be used from
/// within the Axum extractor as well as in other functions.
pub async fn inspect_with_header( pub async fn inspect_with_header(
state: &DbPool, state: &DbPool,
header: &HeaderMap, header: &HeaderMap,

View file

@ -1,3 +1,10 @@
// There are some placeholder functions, that will have to be implemented before the first release.
// They are marked with `todo!()` to indicate that they need to be implemented.
// We want to keep these functions in the codebase.
// That is why we choose to suppress unused warnings for now.
// TODO
#![allow(unused)]
use crate::storage::DbPool; use crate::storage::DbPool;
use axum::extract::State; use axum::extract::State;
use axum::http::StatusCode; use axum::http::StatusCode;

View file

@ -13,22 +13,11 @@ pub struct HttpError {
} }
impl HttpError { impl HttpError {
pub fn simple_with_status(status_code: StatusCode, errors: Vec<String>) -> Response<Body> { pub fn multiple_errors(status_code: StatusCode, errors: Vec<String>) -> Response<Body> {
(status_code, Json(HttpError { errors })).into_response() (status_code, Json(HttpError { errors })).into_response()
} }
pub fn simple(status_code: StatusCode, error: impl ToString) -> Response<Body> { pub fn simple(status_code: StatusCode, error: impl ToString) -> Response<Body> {
HttpError::simple_with_status(status_code, vec![error.to_string(); 1]) HttpError::multiple_errors(status_code, vec![error.to_string(); 1])
}
}
/// Custom serialization function for `secret_data`
pub fn serialize_reject_none<S>(value: &Option<String>, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
match value {
Some(data) => serializer.serialize_str(data),
None => Err(serde::ser::Error::custom("`secret_data` must not be None during serialization!")),
} }
} }

View file

@ -14,7 +14,7 @@ use crate::{common::HttpError, storage::DbPool};
#[derive(Clone)] #[derive(Clone)]
/// State to be used to store the database pool /// State to be used to store the database pool
/// and the routers for each engine /// and the routers for each engine.
struct EngineMapperState { struct EngineMapperState {
pool: DbPool, pool: DbPool,
kv_v2: Router, kv_v2: Router,
@ -23,7 +23,8 @@ struct EngineMapperState {
#[derive(Clone)] #[derive(Clone)]
struct EnginePath(String); struct EnginePath(String);
/// Secret engine router /// Secret engine router.
/// Dynamically puts requests into routers depending on database content.
pub fn secrets_router(pool: DbPool) -> Router<DbPool> { pub fn secrets_router(pool: DbPool) -> Router<DbPool> {
// State containing the pool and engine routers // State containing the pool and engine routers
let state = EngineMapperState { let state = EngineMapperState {

View file

@ -1,3 +1,10 @@
// There are some placeholder functions, that will have to be implemented before the first release.
// They are marked with `todo!()` to indicate that they need to be implemented.
// We want to keep these functions in the codebase.
// That is why we choose to suppress unused warnings for now.
// TODO
#![allow(unused)]
use super::structs::KvV2WriteRequest; use super::structs::KvV2WriteRequest;
use crate::{ use crate::{
common::HttpError, engines::{ common::HttpError, engines::{

View file

@ -1,3 +1,10 @@
// There are some placeholder functions, that will have to be implemented before the first release.
// They are marked with `todo!()` to indicate that they need to be implemented.
// We want to keep these functions in the codebase.
// That is why we choose to suppress unused warnings for now.
// TODO
#![allow(unused)]
use crate::storage::DbPool; use crate::storage::DbPool;
use axum::extract::{Path, State}; use axum::extract::{Path, State};

View file

@ -1,3 +1,9 @@
// There are some placeholder functions, that will have to be implemented before the first release.
// They are marked with `todo!()` to indicate that they need to be implemented.
// We want to keep these functions in the codebase.
// That is why we choose to suppress unused warnings for now.
#![allow(unused)]
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use std::{collections::HashMap, vec}; use std::{collections::HashMap, vec};
use time::{OffsetDateTime, UtcDateTime, serde::rfc3339}; use time::{OffsetDateTime, UtcDateTime, serde::rfc3339};

View file

@ -1,66 +0,0 @@
use std::collections::HashMap;
use chrono::Utc;
use tests::{
logic::patch_metadata,
structs::{SecretMeta, VersionMeta},
};
use super::*;
#[test]
#[cfg(target_feature = "_disabled")]
fn print_serialized_test() {
let temp_secret = TempSecret {
content: String::from("Hallo"),
version: 12,
};
let serialized = serialize_secret_json(&temp_secret);
println!("string serialized: {:?}", serialized);
let deserialized = deserialize_secret_struct(&serialized.unwrap());
println!(
"Struct field from deserialized: {}",
deserialized.unwrap().content
)
}
#[test]
#[cfg(target_feature = "_disabled")]
fn test_patching() {
// TODO add more assertions
let mut base = create_mock_meta();
println!("OLD metadata: {:?}", base);
let overwrite: SecretMeta = SecretMeta {
max_versions: 10,
versions: vec![VersionMeta {
created_time: Utc::now(),
deletion_time: Some(Utc::now()),
destroyed: true,
}],
cas_required: true,
delete_version_after: "10m".to_string(),
current_version: 4,
oldest_version: 2,
updated_time: Utc::now(),
created_time: Utc::now(),
custom_metadata: Some(HashMap::new()),
};
let mut patched: Option<SecretMeta> = None; // Laurenz here
match patch_metadata(&mut base, &overwrite) {
Ok(meta) => {
println!("NEW metadata: {:?}", meta);
println!("patched successfully");
patched = Some(meta);
}
Err(e) => {
log::error!("error patching metadata: {}", e);
panic!("Patching failed");
}
}
if let Some(patched_meta) = patched {
assert!(patched_meta.current_version == 4);
assert!(patched_meta.versions[0].destroyed == true);
} else {
panic!("patched was not initialized");
}
}

View file

@ -1,10 +1,10 @@
#![forbid(unsafe_code)] #![forbid(unsafe_code)]
// There are some placeholder functions, that will have to be implemented before the first release. // // There are some placeholder functions, that will have to be implemented before the first release.
// They are marked with `todo!()` to indicate that they need to be implemented. // // They are marked with `todo!()` to indicate that they need to be implemented.
// We want to keep these functions in the codebase. // // We want to keep these functions in the codebase.
// That is why we choose to suppress unused warnings for now. // // That is why we choose to suppress unused warnings for now.
#![allow(unused)] // #![allow(unused)]
use crate::common::HttpError; use crate::common::HttpError;
use axum::{ use axum::{
@ -29,11 +29,8 @@ mod sys;
#[tokio::main] #[tokio::main]
async fn main() { async fn main() {
// NOTE: Rethink choice of environment variables in regards to security in the future
let _ = dotenvy::dotenv(); let _ = dotenvy::dotenv();
// To be configured via environment variables
// choose from (highest to lowest): error, warn, info, debug, trace, off
// env::set_var("RUST_LOG", "trace"); // TODO: Remove to respect user configuration
// env::set_var("DATABASE_URL", "sqlite:test.db"); // TODO: move to .env
env_logger::init(); env_logger::init();
// Listen on all IPv4 and IPv6 interfaces on port 8200 by default // Listen on all IPv4 and IPv6 interfaces on port 8200 by default
@ -70,13 +67,15 @@ async fn main() {
.unwrap(); .unwrap();
} }
/// Middleware setting unspecified `Content-Type`s to json since this is done by client libraries.
/// Axum's [axum::extract::Json] rejects extraction attempts without json content type.
async fn set_default_content_type_json( async fn set_default_content_type_json(
mut req: Request, mut req: Request,
next: Next, next: Next,
) -> Result<impl IntoResponse, Response> { ) -> Result<impl IntoResponse, Response> {
if req.headers().get("content-type").is_none() { if req.headers().get("content-type").is_none() {
let headers = req.headers_mut(); let headers = req.headers_mut();
// debug!("Request header: \n{:?}", headers);
headers.insert("content-type", "application/json".parse().unwrap()); headers.insert("content-type", "application/json".parse().unwrap());
} }
@ -110,6 +109,7 @@ async fn shutdown_signal(pool: DbPool) {
} }
/// Fallback route for unknown routes /// Fallback route for unknown routes
///
/// Note: `/v1/*` is handled by [`engines::secrets_router`] /// Note: `/v1/*` is handled by [`engines::secrets_router`]
async fn fallback_route_unknown(req: Request) -> Response { async fn fallback_route_unknown(req: Request) -> Response {
log::error!( log::error!(
@ -122,7 +122,7 @@ async fn fallback_route_unknown(req: Request) -> Response {
HttpError::simple(StatusCode::NOT_FOUND, "Route not implemented") HttpError::simple(StatusCode::NOT_FOUND, "Route not implemented")
} }
/// basic handler that responds with a static string /// Basic handler that responds with a static string
async fn root() -> &'static str { async fn root() -> &'static str {
info!("Hello world"); info!("Hello world");
"Hello, World!" "Hello, World!"

View file

@ -8,6 +8,13 @@ use sqlx::{sqlite::SqlitePoolOptions, Pool, Sqlite};
pub(crate) type DbType = Sqlite; pub(crate) type DbType = Sqlite;
pub(crate) type DbPool = Pool<DbType>; pub(crate) type DbPool = Pool<DbType>;
/// Creates a SQLx SQLite database pool.
/// If nonexistent, it creates a new SQLite file.
///
/// Note: rvault uses compile-time queries.
/// Hence, during development a migrated SQLite file is required.
/// Use `cargo sqlx database reset` if required.
/// Otherwise, set the env var `SQLX_OFFLINE=true` during compilation (not helpful for development).
pub async fn create_pool(db_url: String) -> DbPool { pub async fn create_pool(db_url: String) -> DbPool {
// Create SQLite database file if it does not exist // Create SQLite database file if it does not exist
if db_url.starts_with("sqlite:") && db_url != ("sqlite::memory:") { if db_url.starts_with("sqlite:") && db_url != ("sqlite::memory:") {

View file

@ -212,10 +212,6 @@ impl Secret {
/// Encrypt a secret /// Encrypt a secret
/// ///
/// # Panics
///
/// Panics if .
///
/// # Errors /// # Errors
/// ///
/// This function will return an error if the vault is uninitialized or an unknown error occurs. /// This function will return an error if the vault is uninitialized or an unknown error occurs.

View file

@ -29,6 +29,8 @@ struct ShamirPortion {
} }
#[derive(PartialEq)] #[derive(PartialEq)]
/// Container for multiple [ShamirPortion]s and the protected root key.
/// Multiple instances could exist in the future for per-namespace encryption.
pub struct ShamirBucket { pub struct ShamirBucket {
portions: Vec<ShamirPortion>, portions: Vec<ShamirPortion>,
protected_rk: Vec<u8>, protected_rk: Vec<u8>,
@ -66,7 +68,7 @@ impl Sealing for ShamirBucket {
} }
self.portions.push(key_portion); self.portions.push(key_portion);
let abc = match join_keys(&self.portions) { let joined_keys = match join_keys(&self.portions) {
Ok(v) => v, Ok(v) => v,
Err(e) => { Err(e) => {
return match e { return match e {
@ -84,7 +86,7 @@ impl Sealing for ShamirBucket {
} }
.to_bytes(); .to_bytes();
let cipher = match Aes256GcmSiv::new_from_slice(&abc) { let cipher = match Aes256GcmSiv::new_from_slice(&joined_keys) {
Ok(v) => v, Ok(v) => v,
Err(e) => { Err(e) => {
info!("Cipher could not be created from slice: {e}"); info!("Cipher could not be created from slice: {e}");

View file

@ -26,6 +26,7 @@ impl Sealing for SimpleSealing {
} }
} }
/// Initialize the vault with a simple password
#[allow(unused)] #[allow(unused)]
pub async fn init_simple(pool: &DbPool) -> String { pub async fn init_simple(pool: &DbPool) -> String {
let root_key = Aes256GcmSiv::generate_key(&mut OsRng); let root_key = Aes256GcmSiv::generate_key(&mut OsRng);

View file

@ -9,4 +9,6 @@ pub fn root_generation() -> Router<DbPool> {
// .route("/generate-root", delete(cancel_generate_root)) // .route("/generate-root", delete(cancel_generate_root))
} }
async fn generate_new_root() {} async fn generate_new_root() {
todo!()
}

View file

@ -11,7 +11,7 @@ pub fn sealing_routes() -> Router<DbPool> {
.route("/seal", post(seal_post)) .route("/seal", post(seal_post))
.route("/seal-status", get(seal_status_get)) .route("/seal-status", get(seal_status_get))
.route("/unseal", post(unseal_post)) .route("/unseal", post(unseal_post))
// WTF? Again? Its supposed to be POST but actually a PUT // Again? Its supposed to be POST but actually a PUT
.route("/unseal", put(unseal_post)) .route("/unseal", put(unseal_post))
} }
@ -47,6 +47,6 @@ async fn unseal_post(State(pool): State<DbPool>, Json(req): Json<UnsealRequest>)
Ok(()) Ok(())
} }
async fn seal_status_get(State(pool): State<DbPool>) -> &'static str { async fn seal_status_get(State(_pool): State<DbPool>) -> &'static str {
todo!("not implemented") todo!("not implemented")
} }