Compare commits
3 commits
| Author | SHA1 | Date | |
|---|---|---|---|
| b04461c885 | |||
| 2c5919a972 | |||
| 169dcd9811 |
37 changed files with 1259 additions and 1027 deletions
|
|
@ -1,44 +0,0 @@
|
|||
{
|
||||
"db_name": "SQLite",
|
||||
"query": "SELECT service_token.* FROM service_token, service_token_role_membership\n WHERE service_token.id = service_token_role_membership.token_id AND\n service_token_role_membership.role_name = 'root'\n LIMIT 1",
|
||||
"describe": {
|
||||
"columns": [
|
||||
{
|
||||
"name": "id",
|
||||
"ordinal": 0,
|
||||
"type_info": "Text"
|
||||
},
|
||||
{
|
||||
"name": "key",
|
||||
"ordinal": 1,
|
||||
"type_info": "Text"
|
||||
},
|
||||
{
|
||||
"name": "expiry",
|
||||
"ordinal": 2,
|
||||
"type_info": "Integer"
|
||||
},
|
||||
{
|
||||
"name": "parent_id",
|
||||
"ordinal": 3,
|
||||
"type_info": "Text"
|
||||
},
|
||||
{
|
||||
"name": "identity_id",
|
||||
"ordinal": 4,
|
||||
"type_info": "Text"
|
||||
}
|
||||
],
|
||||
"parameters": {
|
||||
"Right": 0
|
||||
},
|
||||
"nullable": [
|
||||
false,
|
||||
false,
|
||||
true,
|
||||
true,
|
||||
true
|
||||
]
|
||||
},
|
||||
"hash": "0aa5c76c9ea1692da29a0f39998946d230f92a8f252294b25afeabe05749f4ca"
|
||||
}
|
||||
|
|
@ -1,44 +0,0 @@
|
|||
{
|
||||
"db_name": "SQLite",
|
||||
"query": "SELECT * FROM 'service_token' WHERE key = $1 AND (expiry IS NULL OR expiry > $2) LIMIT 1",
|
||||
"describe": {
|
||||
"columns": [
|
||||
{
|
||||
"name": "id",
|
||||
"ordinal": 0,
|
||||
"type_info": "Text"
|
||||
},
|
||||
{
|
||||
"name": "key",
|
||||
"ordinal": 1,
|
||||
"type_info": "Text"
|
||||
},
|
||||
{
|
||||
"name": "expiry",
|
||||
"ordinal": 2,
|
||||
"type_info": "Integer"
|
||||
},
|
||||
{
|
||||
"name": "parent_id",
|
||||
"ordinal": 3,
|
||||
"type_info": "Text"
|
||||
},
|
||||
{
|
||||
"name": "identity_id",
|
||||
"ordinal": 4,
|
||||
"type_info": "Text"
|
||||
}
|
||||
],
|
||||
"parameters": {
|
||||
"Right": 2
|
||||
},
|
||||
"nullable": [
|
||||
false,
|
||||
false,
|
||||
true,
|
||||
true,
|
||||
true
|
||||
]
|
||||
},
|
||||
"hash": "2cbe2fbcd5d8fb6d489f9e3cc7e04182f226964ea9d84219abbe6958dcccfefe"
|
||||
}
|
||||
|
|
@ -1,26 +0,0 @@
|
|||
{
|
||||
"db_name": "SQLite",
|
||||
"query": "SELECT * FROM 'service_token_role_membership' WHERE token_id = $1",
|
||||
"describe": {
|
||||
"columns": [
|
||||
{
|
||||
"name": "role_name",
|
||||
"ordinal": 0,
|
||||
"type_info": "Text"
|
||||
},
|
||||
{
|
||||
"name": "token_id",
|
||||
"ordinal": 1,
|
||||
"type_info": "Text"
|
||||
}
|
||||
],
|
||||
"parameters": {
|
||||
"Right": 1
|
||||
},
|
||||
"nullable": [
|
||||
false,
|
||||
false
|
||||
]
|
||||
},
|
||||
"hash": "36485bb70f499346cd1be569887ea8b6f438f4f845ef883e80d58875b839500a"
|
||||
}
|
||||
|
|
@ -1,32 +0,0 @@
|
|||
{
|
||||
"db_name": "SQLite",
|
||||
"query": "SELECT encrypted_key, type as protection_type, nonce FROM root_key ORDER BY version LIMIT 1",
|
||||
"describe": {
|
||||
"columns": [
|
||||
{
|
||||
"name": "encrypted_key",
|
||||
"ordinal": 0,
|
||||
"type_info": "Blob"
|
||||
},
|
||||
{
|
||||
"name": "protection_type",
|
||||
"ordinal": 1,
|
||||
"type_info": "Text"
|
||||
},
|
||||
{
|
||||
"name": "nonce",
|
||||
"ordinal": 2,
|
||||
"type_info": "Blob"
|
||||
}
|
||||
],
|
||||
"parameters": {
|
||||
"Right": 0
|
||||
},
|
||||
"nullable": [
|
||||
false,
|
||||
false,
|
||||
true
|
||||
]
|
||||
},
|
||||
"hash": "5630a591626bd416be0d1ab12fa993055b521e81382897d247ceee1b41f0bf42"
|
||||
}
|
||||
|
|
@ -1,36 +1,31 @@
|
|||
{
|
||||
"db_name": "SQLite",
|
||||
"query": "SELECT nonce, encrypted_data, created_time, deletion_time, version_number, secret_path\n FROM kv2_secret_version WHERE engine_path = $1 AND secret_path = $2 AND deletion_time IS NULL\n ORDER BY version_number DESC LIMIT 1",
|
||||
"query": "SELECT secret_data, created_time, deletion_time, version_number, secret_path\n FROM kv2_secret_version WHERE engine_path = $1 AND secret_path = $2 AND deletion_time IS NULL\n ORDER BY version_number DESC LIMIT 1",
|
||||
"describe": {
|
||||
"columns": [
|
||||
{
|
||||
"name": "nonce",
|
||||
"name": "secret_data",
|
||||
"ordinal": 0,
|
||||
"type_info": "Blob"
|
||||
},
|
||||
{
|
||||
"name": "encrypted_data",
|
||||
"ordinal": 1,
|
||||
"type_info": "Blob"
|
||||
"type_info": "Text"
|
||||
},
|
||||
{
|
||||
"name": "created_time",
|
||||
"ordinal": 2,
|
||||
"ordinal": 1,
|
||||
"type_info": "Datetime"
|
||||
},
|
||||
{
|
||||
"name": "deletion_time",
|
||||
"ordinal": 3,
|
||||
"ordinal": 2,
|
||||
"type_info": "Datetime"
|
||||
},
|
||||
{
|
||||
"name": "version_number",
|
||||
"ordinal": 4,
|
||||
"ordinal": 3,
|
||||
"type_info": "Integer"
|
||||
},
|
||||
{
|
||||
"name": "secret_path",
|
||||
"ordinal": 5,
|
||||
"ordinal": 4,
|
||||
"type_info": "Text"
|
||||
}
|
||||
],
|
||||
|
|
@ -38,7 +33,6 @@
|
|||
"Right": 2
|
||||
},
|
||||
"nullable": [
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
true,
|
||||
|
|
@ -46,5 +40,5 @@
|
|||
false
|
||||
]
|
||||
},
|
||||
"hash": "b78c62fe22c4e93c54ecbc0c0cdfa31387baf14bea1ac8d27170e8b6cb456114"
|
||||
"hash": "844de8351a0ed204e2080857373507389c90453b5d3ad92344272838958ab28e"
|
||||
}
|
||||
|
|
@ -1,20 +0,0 @@
|
|||
{
|
||||
"db_name": "SQLite",
|
||||
"query": "\nWITH latest_version AS (\n SELECT MAX(version_number) AS max_version\n FROM kv2_secret_version\n WHERE engine_path = $1 AND secret_path = $2 -- engine_path AND secret_path\n)\nINSERT INTO kv2_secret_version (engine_path, secret_path, nonce, encrypted_data, created_time, version_number)\nVALUES (\n $1, -- engine_path\n $2, -- secret_path\n $3, -- nonce\n $4, -- encrypted_data\n $5, -- created_time\n CASE -- Use provided version if given\n WHEN $6 IS NOT NULL THEN $6 -- version_number (optional)\n ELSE COALESCE((SELECT max_version FROM latest_version) + 1, 1) -- otherwise 1\n END -- version_number logic\n)\nRETURNING version_number;\n",
|
||||
"describe": {
|
||||
"columns": [
|
||||
{
|
||||
"name": "version_number",
|
||||
"ordinal": 0,
|
||||
"type_info": "Integer"
|
||||
}
|
||||
],
|
||||
"parameters": {
|
||||
"Right": 6
|
||||
},
|
||||
"nullable": [
|
||||
false
|
||||
]
|
||||
},
|
||||
"hash": "8f7bfd1840d14efec44c7b59ab10461ff122ead43076ad841883a9dd189a4f37"
|
||||
}
|
||||
|
|
@ -1,36 +1,31 @@
|
|||
{
|
||||
"db_name": "SQLite",
|
||||
"query": "SELECT nonce, encrypted_data, created_time, deletion_time, version_number, secret_path\n FROM kv2_secret_version WHERE engine_path = $1 AND secret_path = $2 AND deletion_time IS NULL\n AND version_number = $3",
|
||||
"query": "SELECT secret_data, created_time, deletion_time, version_number, secret_path\n FROM kv2_secret_version WHERE engine_path = $1 AND secret_path = $2 AND deletion_time IS NULL\n AND version_number = $3",
|
||||
"describe": {
|
||||
"columns": [
|
||||
{
|
||||
"name": "nonce",
|
||||
"name": "secret_data",
|
||||
"ordinal": 0,
|
||||
"type_info": "Blob"
|
||||
},
|
||||
{
|
||||
"name": "encrypted_data",
|
||||
"ordinal": 1,
|
||||
"type_info": "Blob"
|
||||
"type_info": "Text"
|
||||
},
|
||||
{
|
||||
"name": "created_time",
|
||||
"ordinal": 2,
|
||||
"ordinal": 1,
|
||||
"type_info": "Datetime"
|
||||
},
|
||||
{
|
||||
"name": "deletion_time",
|
||||
"ordinal": 3,
|
||||
"ordinal": 2,
|
||||
"type_info": "Datetime"
|
||||
},
|
||||
{
|
||||
"name": "version_number",
|
||||
"ordinal": 4,
|
||||
"ordinal": 3,
|
||||
"type_info": "Integer"
|
||||
},
|
||||
{
|
||||
"name": "secret_path",
|
||||
"ordinal": 5,
|
||||
"ordinal": 4,
|
||||
"type_info": "Text"
|
||||
}
|
||||
],
|
||||
|
|
@ -38,7 +33,6 @@
|
|||
"Right": 3
|
||||
},
|
||||
"nullable": [
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
true,
|
||||
|
|
@ -46,5 +40,5 @@
|
|||
false
|
||||
]
|
||||
},
|
||||
"hash": "fa8c74205ae4d497983d394ee04181c08d20cdb4a93bfce3c06a114133cd6619"
|
||||
"hash": "919758dd0aee1053065d62d528bca5bbd5220b909b6c1b5eb5c77ce0dd2259e4"
|
||||
}
|
||||
|
|
@ -1,12 +0,0 @@
|
|||
{
|
||||
"db_name": "SQLite",
|
||||
"query": "\n INSERT INTO root_key (encrypted_key, type, version, nonce)\n VALUES ($1, $2, 1, $3)\n ",
|
||||
"describe": {
|
||||
"columns": [],
|
||||
"parameters": {
|
||||
"Right": 3
|
||||
},
|
||||
"nullable": []
|
||||
},
|
||||
"hash": "aa131c57e0e255bfe07488095bdf25ab39e9dee182d0aecf988c9d3c2d04e66d"
|
||||
}
|
||||
|
|
@ -0,0 +1,20 @@
|
|||
{
|
||||
"db_name": "SQLite",
|
||||
"query": "\nWITH latest_version AS (\n SELECT MAX(version_number) AS max_version\n FROM kv2_secret_version\n WHERE engine_path = $1 AND secret_path = $2 -- engine_path AND secret_path\n)\nINSERT INTO kv2_secret_version (engine_path, secret_path, secret_data, created_time, version_number)\nVALUES (\n $1, -- engine_path\n $2, -- secret_path\n $3, -- secret_data\n $4, -- created_time\n CASE -- Use provided version if given\n WHEN $5 IS NOT NULL THEN $5 -- version_number (optional)\n ELSE COALESCE((SELECT max_version FROM latest_version) + 1, 0)\n END -- version_number logic\n)\nRETURNING version_number;\n",
|
||||
"describe": {
|
||||
"columns": [
|
||||
{
|
||||
"name": "version_number",
|
||||
"ordinal": 0,
|
||||
"type_info": "Integer"
|
||||
}
|
||||
],
|
||||
"parameters": {
|
||||
"Right": 5
|
||||
},
|
||||
"nullable": [
|
||||
false
|
||||
]
|
||||
},
|
||||
"hash": "c6beeb7d8672039df5258ada802920aae8f16db215dda5ab447dbe832f4a6703"
|
||||
}
|
||||
|
|
@ -1,12 +0,0 @@
|
|||
{
|
||||
"db_name": "SQLite",
|
||||
"query": "\n INSERT INTO service_token (id, key) VALUES ($1, $2);\n INSERT INTO service_token_role_membership (token_id, role_name) VALUES ($3, 'root');\n ",
|
||||
"describe": {
|
||||
"columns": [],
|
||||
"parameters": {
|
||||
"Right": 3
|
||||
},
|
||||
"nullable": []
|
||||
},
|
||||
"hash": "fe6bf34448b9f9defc27ce30a128935d991cd06e22861086c3b1377916731e57"
|
||||
}
|
||||
743
Cargo.lock
generated
743
Cargo.lock
generated
File diff suppressed because it is too large
Load diff
12
Cargo.toml
12
Cargo.toml
|
|
@ -9,6 +9,10 @@ default = ["shamir"]
|
|||
insecure-dev-sealing = []
|
||||
shamir = ["vsss-rs", "p256"]
|
||||
|
||||
[lib]
|
||||
proc-macro = true
|
||||
path = "src/macros.rs"
|
||||
|
||||
[dependencies]
|
||||
log = "0.4.27"
|
||||
env_logger = "0.11.7"
|
||||
|
|
@ -36,8 +40,12 @@ sqlx = { version = "0.8.3", features = [
|
|||
aes-gcm-siv = "0.11.1"
|
||||
vsss-rs = { version = "5.1.0", optional = true, default-features = false, features = ["zeroize", "std"] }
|
||||
p256 = { version = "0.13.2", optional = true, default-features = false, features = ["std", "ecdsa"] }
|
||||
rand = "0.8.5"
|
||||
uuid = { version = "1.16.0", features = ["v4"] }
|
||||
blake3 = { version = "1.8.2" }
|
||||
bincode = { version = "2.0.1", features = ["serde"] }
|
||||
ed25519 = { version = "2.2.3", features = ["serde"] }
|
||||
ed25519-dalek = { version = "2.1.1", features = ["rand_core"] }
|
||||
syn = "2.0.101"
|
||||
quote = "1.0.40"
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
ARG alpine_version=3.22
|
||||
ARG alpine_version=3.21
|
||||
|
||||
FROM docker.io/library/rust:1-alpine${alpine_version} AS builder
|
||||
|
||||
|
|
|
|||
30
README.md
30
README.md
|
|
@ -1,30 +0,0 @@
|
|||
|
||||
# rvault
|
||||
|
||||
rvault is an open-source implementation of the API of Vault and OpenBao, written in Rust.
|
||||
|
||||
## Running
|
||||
|
||||
You can run an offline build with `SQLX_OFFLINE=true cargo run` or `build`, respectively.
|
||||
An offline build requires an up-to-date SQLx preparation.
|
||||
|
||||
An OCI container image can be created using `podman build . -t rvault`.
|
||||
|
||||
Furthermore, rvault attempts to read a `.env` file in the current working directory.
|
||||
For example, its content could be:
|
||||
|
||||
```txt
|
||||
DATABASE_URL=sqlite:test.db
|
||||
RUST_LOG=debug
|
||||
```
|
||||
|
||||
## Development
|
||||
|
||||
SQLx preparation can be updated with `cargo sqlx prep`.
|
||||
Hence, it is not useful for development.
|
||||
With `cargo sqlx database reset` the database will be recreated,
|
||||
deleting all contents and reapplying migrations.
|
||||
This is helpful when changing migrations during development.
|
||||
|
||||
When running a normal, not-offline, build, the database must be migrated (e.g. using `cargo sqlx database reset`)
|
||||
for compilation of compile-time-checked queries.
|
||||
|
|
@ -1,15 +1,20 @@
|
|||
-- Add migration script here
|
||||
|
||||
CREATE TABLE kv2_engine_cfg (
|
||||
engine_path TEXT PRIMARY KEY REFERENCES secret_engines (mount_point),
|
||||
max_versions UNSIGNED INTEGER CHECK ( max_versions > 0 ), -- Shall be proper NULL if 0
|
||||
max_age_secs UNSIGNED INTEGER CHECK ( max_versions > 0 ), -- Shall be proper NULL if 0
|
||||
cas_required BOOLEAN NOT NULL DEFAULT (FALSE)
|
||||
);
|
||||
|
||||
CREATE TABLE kv2_metadata (
|
||||
engine_path TEXT NOT NULL,
|
||||
engine_path TEXT NOT NULL REFERENCES secret_engines (mount_point),
|
||||
secret_path TEXT NOT NULL,
|
||||
|
||||
cas_required INTEGER NOT NULL, -- no bool datatype in sqlite
|
||||
created_time TIMESTAMP NOT NULL,
|
||||
delete_version_after TEXT, -- Maybe NOT NULL
|
||||
delete_version_after TEXT, -- May be NULL
|
||||
max_versions INTEGER NOT NULL,
|
||||
-- current_version INTEGER NOT NULL,
|
||||
-- oldest_version INTEGER NOT NULL,
|
||||
updated_time TIMESTAMP NOT NULL,
|
||||
custom_data TEXT,
|
||||
|
||||
|
|
@ -27,6 +32,8 @@ CREATE TABLE kv2_secret_version (
|
|||
encrypted_data BLOB NOT NULL,
|
||||
nonce BLOB NOT NULL CHECK ( length(nonce) = 12 ),
|
||||
|
||||
signature BLOB NOT NULL,
|
||||
|
||||
PRIMARY KEY (engine_path, secret_path, version_number),
|
||||
FOREIGN KEY (engine_path, secret_path) REFERENCES kv2_metadata(engine_path, secret_path)
|
||||
);
|
||||
|
|
|
|||
|
|
@ -1,25 +0,0 @@
|
|||
CREATE TABLE identity (
|
||||
id TEXT PRIMARY KEY NOT NULL,
|
||||
name TEXT NOT NULL
|
||||
);
|
||||
|
||||
CREATE TABLE service_token_role_membership (
|
||||
role_name TEXT NOT NULL,
|
||||
token_id TEXT NOT NULL
|
||||
REFERENCES service_token(id)
|
||||
ON DELETE CASCADE
|
||||
ON UPDATE CASCADE,
|
||||
PRIMARY KEY (role_name, token_id)
|
||||
);
|
||||
|
||||
CREATE TABLE service_token (
|
||||
id TEXT PRIMARY KEY NOT NULL,
|
||||
key TEXT NOT NULL,
|
||||
expiry INTEGER,
|
||||
parent_id TEXT NULL REFERENCES service_token(id)
|
||||
ON DELETE NO ACTION
|
||||
ON UPDATE CASCADE,
|
||||
identity_id TEXT NULL REFERENCES identity(id)
|
||||
ON DELETE CASCADE
|
||||
ON UPDATE CASCADE
|
||||
);
|
||||
19
src/auth.rs
19
src/auth.rs
|
|
@ -1,14 +1,13 @@
|
|||
pub mod auth_extractor;
|
||||
pub(crate) mod token;
|
||||
|
||||
use crate::auth::token::*;
|
||||
use crate::storage::DbPool;
|
||||
use axum::Router;
|
||||
|
||||
/// Authentication routes
|
||||
use crate::storage::DbPool;
|
||||
|
||||
// route prefix: `/auth/token/`
|
||||
// mod token;
|
||||
|
||||
// use self::token::token_auth_router;
|
||||
|
||||
pub fn auth_router(pool: DbPool) -> Router<DbPool> {
|
||||
// The token auth router handles all token-related authentication routes
|
||||
Router::new()
|
||||
.nest("/token", token_auth_router(pool.clone()))
|
||||
.with_state(pool)
|
||||
Router::new().with_state(pool)
|
||||
// .nest("/token", token_auth_router())
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,66 +0,0 @@
|
|||
use crate::auth::token::{TokenDTO, get_roles_from_token, get_token_from_key};
|
||||
use crate::storage::DbPool;
|
||||
use axum::body::Body;
|
||||
use axum::extract::FromRequestParts;
|
||||
use axum::http::request::Parts;
|
||||
use axum::http::{HeaderMap, Request, StatusCode, header};
|
||||
use std::fmt::Debug;
|
||||
|
||||
// Currently unused but for usage in the future
|
||||
#[allow(unused)]
|
||||
/// AuthInfo is an extractor that retrieves authentication information from the request.
|
||||
#[derive(Debug)]
|
||||
pub struct AuthInfo {
|
||||
token: TokenDTO,
|
||||
roles: Vec<String>,
|
||||
}
|
||||
|
||||
impl FromRequestParts<DbPool> for AuthInfo {
|
||||
type Rejection = StatusCode;
|
||||
|
||||
/// Extracts authentication information from the request parts.
|
||||
async fn from_request_parts(
|
||||
parts: &mut Parts,
|
||||
state: &DbPool,
|
||||
) -> Result<Self, Self::Rejection> {
|
||||
let header = &parts.headers;
|
||||
|
||||
inspect_with_header(state, header).await
|
||||
}
|
||||
}
|
||||
|
||||
// Currently unused but for usage in the future
|
||||
#[allow(unused)]
|
||||
/// Extracts the headers from request and returns the result from inspect_with_header function.
|
||||
pub async fn inspect_req(state: &DbPool, req: &Request<Body>) -> Result<AuthInfo, StatusCode> {
|
||||
let header = req.headers();
|
||||
inspect_with_header(state, header).await
|
||||
}
|
||||
|
||||
/// Inspects the request headers and extracts authentication information.
|
||||
/// Returns an `AuthInfo` struct containing the token and roles if successful.
|
||||
/// If the authorization header is missing or invalid, it returns a `StatusCode::UNAUTHORIZED`.
|
||||
///
|
||||
/// This function is intentionally separated so it can be used from
|
||||
/// within the Axum extractor as well as in other functions.
|
||||
pub async fn inspect_with_header(
|
||||
state: &DbPool,
|
||||
header: &HeaderMap,
|
||||
) -> Result<AuthInfo, StatusCode> {
|
||||
let auth_header = header
|
||||
.get(header::AUTHORIZATION)
|
||||
.and_then(|value| value.to_str().ok());
|
||||
|
||||
match auth_header {
|
||||
Some(auth_value) => {
|
||||
let token = get_token_from_key(auth_value, state).await;
|
||||
if token.is_err() {
|
||||
return Err(StatusCode::UNAUTHORIZED);
|
||||
}
|
||||
let token = token.unwrap();
|
||||
let roles = get_roles_from_token(&token, state).await;
|
||||
Ok(AuthInfo { token, roles })
|
||||
}
|
||||
None => Err(StatusCode::UNAUTHORIZED),
|
||||
}
|
||||
}
|
||||
|
|
@ -1,286 +1,45 @@
|
|||
// There are some placeholder functions, that will have to be implemented before the first release.
|
||||
// They are marked with `todo!()` to indicate that they need to be implemented.
|
||||
// We want to keep these functions in the codebase.
|
||||
// That is why we choose to suppress unused warnings for now.
|
||||
// TODO
|
||||
#![allow(unused)]
|
||||
use axum::Router;
|
||||
|
||||
use crate::storage::DbPool;
|
||||
use axum::extract::State;
|
||||
use axum::http::StatusCode;
|
||||
use axum::response::{IntoResponse, Response};
|
||||
use axum::routing::post;
|
||||
use axum::{Json, Router};
|
||||
use log::error;
|
||||
use rand::{Rng, distributions::Alphanumeric};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use sqlx::Error;
|
||||
use uuid::Uuid;
|
||||
|
||||
#[derive(Debug, Serialize)]
|
||||
pub struct IdentityDTO {
|
||||
id: String,
|
||||
name: String,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct TokenDTO {
|
||||
key: String,
|
||||
id: String,
|
||||
identity_id: Option<String>,
|
||||
parent_id: Option<String>,
|
||||
expiry: Option<i64>,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct TokenRoleMembershipDTO {
|
||||
role_name: String,
|
||||
token_id: String,
|
||||
}
|
||||
|
||||
/// Represents a request body for the `/auth/token/lookup` endpoint.
|
||||
#[derive(Deserialize)]
|
||||
struct RequestBodyPostLookup {
|
||||
token: String,
|
||||
}
|
||||
|
||||
/// Represents the response body for the `/auth/token/lookup` endpoint.
|
||||
#[derive(Serialize)]
|
||||
struct TokenLookupResponse {
|
||||
id: String,
|
||||
type_name: String,
|
||||
roles: Vec<String>,
|
||||
}
|
||||
|
||||
/// Represents an error response for the API.
|
||||
#[derive(Serialize)]
|
||||
struct ErrorResponse {
|
||||
error: String,
|
||||
}
|
||||
|
||||
/// Generates a random string of the specified length using alphanumeric characters.
|
||||
// TODO: Make string generation secure
|
||||
fn get_random_string(len: usize) -> String {
|
||||
rand::thread_rng()
|
||||
.sample_iter(&Alphanumeric)
|
||||
.take(len)
|
||||
.map(char::from)
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// Creates a root token if none exists in the database.
|
||||
/// Returns true if a new root token was created, false if one already exists.
|
||||
pub async fn create_root_token_if_none_exist(pool: &DbPool) -> bool {
|
||||
// Check if a root token already exists
|
||||
let exists = sqlx::query!(
|
||||
r#"SELECT service_token.* FROM service_token, service_token_role_membership
|
||||
WHERE service_token.id = service_token_role_membership.token_id AND
|
||||
service_token_role_membership.role_name = 'root'
|
||||
LIMIT 1"#
|
||||
)
|
||||
.fetch_one(pool)
|
||||
.await
|
||||
.is_ok();
|
||||
if exists {
|
||||
return false;
|
||||
}
|
||||
// If no root token exists, create one
|
||||
let result = create_root_token(pool).await;
|
||||
if result.is_err() {
|
||||
let error = result.err().unwrap();
|
||||
// Log the error and panic
|
||||
error!("create_root_token failed: {error:?}");
|
||||
panic!("create_root_token failed: {error:?}");
|
||||
}
|
||||
// If successful, print the root token. This will only happen once.
|
||||
println!("\n\nYour root token is: {}", result.unwrap());
|
||||
println!("It will only be displayed once!\n\n");
|
||||
true
|
||||
}
|
||||
|
||||
/// Creates a root token in the database.
|
||||
async fn create_root_token(pool: &DbPool) -> Result<String, Error> {
|
||||
let id = Uuid::new_v4().to_string();
|
||||
let key = "s.".to_string() + &get_random_string(24);
|
||||
// Insert the root token into the database
|
||||
let result = sqlx::query!(r#"
|
||||
INSERT INTO service_token (id, key) VALUES ($1, $2);
|
||||
INSERT INTO service_token_role_membership (token_id, role_name) VALUES ($3, 'root');
|
||||
"#, id, key, id).execute(pool).await;
|
||||
// If the insert was successful, return the key
|
||||
if result.is_ok() {
|
||||
return Ok(key);
|
||||
}
|
||||
// Else, return the error
|
||||
Err(result.unwrap_err())
|
||||
}
|
||||
|
||||
/// Gets the current time in seconds since unix epoch
|
||||
fn get_time_as_int() -> i64 {
|
||||
std::time::SystemTime::now()
|
||||
.duration_since(std::time::UNIX_EPOCH)
|
||||
.unwrap()
|
||||
.as_secs() as i64
|
||||
}
|
||||
|
||||
/// Gets the type of token. (The first character of the key always specifies the type)
|
||||
fn get_token_type(token: &TokenDTO) -> Result<String, &str> {
|
||||
Ok(match token.key.clone().chars().next().unwrap_or('?') {
|
||||
's' => "service",
|
||||
'b' => "batch",
|
||||
'r' => "recovery",
|
||||
_ => {
|
||||
error!("Unsupported token type");
|
||||
return Err("Unsupported token type");
|
||||
}
|
||||
}
|
||||
.to_string())
|
||||
}
|
||||
|
||||
/// Retrieves a token from the database using its key.
|
||||
/// If the token is found and not expired, it returns the token.
|
||||
/// Else, it returns an error.
|
||||
pub async fn get_token_from_key(token_key: &str, pool: &DbPool) -> Result<TokenDTO, Error> {
|
||||
let time = get_time_as_int();
|
||||
sqlx::query_as!(
|
||||
TokenDTO,
|
||||
r#"SELECT * FROM 'service_token' WHERE key = $1 AND (expiry IS NULL OR expiry > $2) LIMIT 1"#,
|
||||
token_key, time).fetch_one(pool).await
|
||||
}
|
||||
|
||||
/// Retrieves the roles associated with a given token from the database.
|
||||
/// If the token does not exist, it returns an empty vector.
|
||||
pub async fn get_roles_from_token(token: &TokenDTO, pool: &DbPool) -> Vec<String> {
|
||||
let result = sqlx::query_as!(
|
||||
TokenRoleMembershipDTO,
|
||||
r#"SELECT * FROM 'service_token_role_membership' WHERE token_id = $1"#,
|
||||
token.id
|
||||
)
|
||||
.fetch_all(pool)
|
||||
.await;
|
||||
result
|
||||
.unwrap_or(Vec::new())
|
||||
.iter()
|
||||
.map(|r| r.role_name.to_string())
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// Return a router, that may be used to route traffic to the corresponding handlers
|
||||
pub fn token_auth_router(pool: DbPool) -> Router<DbPool> {
|
||||
pub fn token_auth_router() -> Router {
|
||||
Router::new()
|
||||
.route("/lookup", post(post_lookup))
|
||||
.with_state(pool)
|
||||
}
|
||||
|
||||
/// Handles the `/auth/token/lookup` endpoint.
|
||||
/// Retrieves the token and its associated roles from the database using the provided token key.
|
||||
/// The output format does not yet match the openBao specification and is for testing only!
|
||||
async fn post_lookup(
|
||||
State(pool): State<DbPool>,
|
||||
Json(body): Json<RequestBodyPostLookup>,
|
||||
) -> Response {
|
||||
let token_str = body.token;
|
||||
// Validate the token string
|
||||
match get_token_from_key(&token_str, &pool).await {
|
||||
// If the token is found, retrieve its type and roles
|
||||
Ok(token) => {
|
||||
let type_name = get_token_type(&token).unwrap_or_else(|_| String::from("Unknown"));
|
||||
let roles = get_roles_from_token(&token, &pool).await;
|
||||
let resp = TokenLookupResponse {
|
||||
id: token.id,
|
||||
type_name,
|
||||
roles,
|
||||
};
|
||||
// Return the token information as a JSON response
|
||||
(StatusCode::OK, axum::Json(resp)).into_response()
|
||||
}
|
||||
// If the token is not found, return a 404 Not Found error
|
||||
Err(e) => {
|
||||
error!("Failed to retrieve token: {e:?}");
|
||||
let err = ErrorResponse {
|
||||
error: "Failed to retrieve token".to_string(),
|
||||
};
|
||||
(StatusCode::NOT_FOUND, axum::Json(err)).into_response()
|
||||
}
|
||||
}
|
||||
}
|
||||
async fn get_accessors() {}
|
||||
|
||||
//
|
||||
// The following functions are placeholders for the various token-related operations.
|
||||
//
|
||||
async fn post_create() {}
|
||||
|
||||
async fn get_accessors() -> &'static str {
|
||||
todo!("not implemented")
|
||||
}
|
||||
async fn post_create_orphan() {}
|
||||
|
||||
async fn post_create() -> &'static str {
|
||||
todo!("not implemented")
|
||||
}
|
||||
async fn post_create_role() {}
|
||||
|
||||
async fn post_create_orphan() -> &'static str {
|
||||
todo!("not implemented")
|
||||
}
|
||||
async fn get_lookup() {}
|
||||
|
||||
async fn post_create_role() -> &'static str {
|
||||
todo!("not implemented")
|
||||
}
|
||||
async fn post_lookup() {}
|
||||
|
||||
async fn get_lookup() -> &'static str {
|
||||
todo!("not implemented")
|
||||
}
|
||||
async fn get_lookup_self() {}
|
||||
|
||||
async fn get_lookup_self() -> &'static str {
|
||||
todo!("not implemented")
|
||||
}
|
||||
async fn post_lookup_self() {}
|
||||
|
||||
async fn post_lookup_self() -> &'static str {
|
||||
todo!("not implemented")
|
||||
}
|
||||
async fn post_renew() {}
|
||||
|
||||
async fn post_renew() -> &'static str {
|
||||
todo!("not implemented")
|
||||
}
|
||||
async fn post_renew_accessor() {}
|
||||
|
||||
async fn post_renew_accessor() -> &'static str {
|
||||
todo!("not implemented")
|
||||
}
|
||||
async fn post_renew_self() {}
|
||||
|
||||
async fn post_renew_self() -> &'static str {
|
||||
todo!("not implemented")
|
||||
}
|
||||
async fn post_revoke() {}
|
||||
|
||||
async fn post_revoke() -> &'static str {
|
||||
todo!("not implemented")
|
||||
}
|
||||
async fn post_revoke_accessor() {}
|
||||
|
||||
async fn post_revoke_accessor() -> &'static str {
|
||||
todo!("not implemented")
|
||||
}
|
||||
async fn post_revoke_orphan() {}
|
||||
|
||||
async fn post_revoke_orphan() -> &'static str {
|
||||
todo!("not implemented")
|
||||
}
|
||||
async fn post_revoke_self() {}
|
||||
|
||||
async fn post_revoke_self() -> &'static str {
|
||||
todo!("not implemented")
|
||||
}
|
||||
async fn get_roles() {}
|
||||
|
||||
async fn get_roles() -> &'static str {
|
||||
todo!("not implemented")
|
||||
}
|
||||
async fn get_role_by_name() {}
|
||||
|
||||
async fn get_role_by_name() -> &'static str {
|
||||
todo!("not implemented")
|
||||
}
|
||||
async fn post_role_by_name() {}
|
||||
|
||||
async fn post_role_by_name() -> &'static str {
|
||||
todo!("not implemented")
|
||||
}
|
||||
async fn delete_role_by_name() {}
|
||||
|
||||
async fn delete_role_by_name() -> &'static str {
|
||||
todo!("not implemented")
|
||||
}
|
||||
|
||||
async fn post_tidy() -> &'static str {
|
||||
todo!("not implemented")
|
||||
}
|
||||
async fn post_tidy() {}
|
||||
|
|
|
|||
|
|
@ -13,11 +13,106 @@ pub struct HttpError {
|
|||
}
|
||||
|
||||
impl HttpError {
|
||||
pub fn multiple_errors(status_code: StatusCode, errors: Vec<String>) -> Response<Body> {
|
||||
pub fn new(status_code: StatusCode, errors: Vec<String>) -> Response<Body> {
|
||||
(status_code, Json(HttpError { errors })).into_response()
|
||||
}
|
||||
|
||||
pub fn simple(status_code: StatusCode, error: impl ToString) -> Response<Body> {
|
||||
HttpError::multiple_errors(status_code, vec![error.to_string(); 1])
|
||||
HttpError::new(status_code, vec![error.to_string(); 1])
|
||||
}
|
||||
}
|
||||
|
||||
/// Custom serialization function for `secret_data`
|
||||
pub fn serialize_reject_none<S>(value: &Option<String>, serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: serde::Serializer,
|
||||
{
|
||||
match value {
|
||||
Some(data) => serializer.serialize_str(data),
|
||||
None => Err(serde::ser::Error::custom(
|
||||
"`secret_data` must not be None during serialization!",
|
||||
)),
|
||||
}
|
||||
}
|
||||
|
||||
/// Parses duration strings to seconds.
|
||||
/// Returns `None` on error or empty string.
|
||||
/// Example: `4h3m1s`
|
||||
pub fn parse_duration_str(input: &String) -> Option<u32> {
|
||||
if input.is_empty() {
|
||||
return None;
|
||||
}
|
||||
input
|
||||
.split_inclusive(char::is_alphabetic)
|
||||
.try_fold(0u32, |acc, chunk| {
|
||||
let (value, unit) = chunk.split_at(chunk.len() - 1);
|
||||
let value = value.parse::<u32>().ok()?;
|
||||
match unit {
|
||||
"h" => acc.checked_add(value.checked_mul(3600)?),
|
||||
"m" => acc.checked_add(value.checked_mul(60)?),
|
||||
"s" => acc.checked_add(value),
|
||||
_ => None,
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_parse_duration_str_valid_inputs() {
|
||||
let cases = vec![
|
||||
("0s", 0),
|
||||
("4h", 14400),
|
||||
("3m", 180),
|
||||
("1s", 1),
|
||||
("4h3m1s", 14581),
|
||||
("2h30m", 9000),
|
||||
("2h30s", 7230),
|
||||
("2m30s", 150),
|
||||
];
|
||||
|
||||
for (str, res) in cases {
|
||||
assert_eq!(parse_duration_str(&str.to_string()), Some(res))
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_duration_str_invalid_inputs() {
|
||||
let cases = vec!["", "-5s", "4x", "4h3x1s", "4h-3m1s", "4h3m1"];
|
||||
|
||||
for str in cases {
|
||||
assert_eq!(parse_duration_str(&str.to_string()), None);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_duration_str_edge_cases() {
|
||||
let cases = vec![
|
||||
("0h0m0s", Some(0)),
|
||||
("0h", Some(0)),
|
||||
("0m", Some(0)),
|
||||
("0s", Some(0)),
|
||||
("1h0m0s", Some(3600)),
|
||||
("1m1000000s", Some(60 + 1000000)),
|
||||
];
|
||||
|
||||
for (str, res) in cases {
|
||||
assert_eq!(parse_duration_str(&str.to_string()), res)
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_duration_str_overflow() {
|
||||
let cases = vec![
|
||||
"100000000h".to_string(), //
|
||||
"100000000m".to_string(), //
|
||||
format!("{}s", u32::MAX as u64 + 1),
|
||||
];
|
||||
|
||||
for str in cases {
|
||||
assert_eq!(parse_duration_str(&str), None, "Failed for {str}");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,11 +1,11 @@
|
|||
pub mod kv;
|
||||
|
||||
use axum::{
|
||||
Extension, Router,
|
||||
body::Body,
|
||||
extract::{Request, State},
|
||||
http::{StatusCode, Uri},
|
||||
response::{IntoResponse, Response},
|
||||
Extension, Router,
|
||||
};
|
||||
use log::*;
|
||||
use tower::Service;
|
||||
|
|
@ -14,7 +14,7 @@ use crate::{common::HttpError, storage::DbPool};
|
|||
|
||||
#[derive(Clone)]
|
||||
/// State to be used to store the database pool
|
||||
/// and the routers for each engine.
|
||||
/// and the routers for each engine
|
||||
struct EngineMapperState {
|
||||
pool: DbPool,
|
||||
kv_v2: Router,
|
||||
|
|
@ -23,8 +23,7 @@ struct EngineMapperState {
|
|||
#[derive(Clone)]
|
||||
struct EnginePath(String);
|
||||
|
||||
/// Secret engine router.
|
||||
/// Dynamically puts requests into routers depending on database content.
|
||||
/// Secret engine router
|
||||
pub fn secrets_router(pool: DbPool) -> Router<DbPool> {
|
||||
// State containing the pool and engine routers
|
||||
let state = EngineMapperState {
|
||||
|
|
@ -43,7 +42,7 @@ async fn engine_handler(
|
|||
req: Request,
|
||||
) -> Response<Body> {
|
||||
if let Some((mount_path, engine_type)) = map_mount_points(req.uri(), &engines.pool).await {
|
||||
info!("Found mount point {mount_path} of type {engine_type}");
|
||||
info!("Found mount point {} of type {}", mount_path, engine_type);
|
||||
// Match the engine type to the appropriate router
|
||||
match engine_type.as_str() {
|
||||
"kv_v2" => call_router(engines.kv_v2, mount_path, req).await,
|
||||
|
|
@ -73,7 +72,7 @@ async fn call_router(engine: Router, mount_path: String, mut req: Request) -> Re
|
|||
/// Occurs when the mount path is found in the database
|
||||
/// but the registered is unknown
|
||||
fn unknown_engine(engine_type: String) -> impl IntoResponse {
|
||||
error!("Engine type {engine_type} not implemented");
|
||||
error!("Engine type {} not implemented", engine_type);
|
||||
HttpError::simple(
|
||||
StatusCode::INTERNAL_SERVER_ERROR,
|
||||
format!("Engine type {engine_type} not implemented"),
|
||||
|
|
|
|||
|
|
@ -5,8 +5,11 @@ mod structs;
|
|||
// #[cfg(test)]
|
||||
// mod tests;
|
||||
|
||||
use crate::storage::DbPool;
|
||||
use axum::{Router, routing::*};
|
||||
use crate::{common::parse_duration_str, storage::DbPool};
|
||||
use axum::{Extension, Json, Router, extract::State, response::NoContent, routing::*};
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use super::EnginePath;
|
||||
|
||||
pub fn kv_router(pool: DbPool) -> Router {
|
||||
Router::new()
|
||||
|
|
@ -25,7 +28,7 @@ pub fn kv_router(pool: DbPool) -> Router {
|
|||
.route("/metadata/{*path}", post(meta::post_meta))
|
||||
.route("/metadata/{*path}", delete(meta::delete_meta))
|
||||
.route("/subkeys/{*path}", get(get_subkeys))
|
||||
.route("/undelete/{*path}", post(post_undelete))
|
||||
// .route("/undelete/{*path}", post(data::post_undelete)) // TODO
|
||||
.with_state(pool)
|
||||
}
|
||||
|
||||
|
|
@ -33,14 +36,51 @@ async fn get_config() -> &'static str {
|
|||
todo!("not implemented")
|
||||
}
|
||||
|
||||
async fn post_config() -> &'static str {
|
||||
todo!("not implemented")
|
||||
#[derive(Debug, Deserialize, Serialize)]
|
||||
struct Config {
|
||||
/// A value `0` shall be be treated as 10
|
||||
/// TODO: Not implemented
|
||||
pub max_versions: Option<u32>,
|
||||
#[serde(default)]
|
||||
// TODO: Not implemented
|
||||
pub cas_required: bool,
|
||||
/// Max age of a secret version
|
||||
/// Example: `"3h25m19s"`
|
||||
/// `0s` disable automatic deletion
|
||||
/// TODO: Not implemented
|
||||
pub delete_version_after: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize, Serialize)]
|
||||
struct ConfigRes {
|
||||
pub data: Config,
|
||||
}
|
||||
|
||||
async fn post_config(
|
||||
State(pool): State<DbPool>,
|
||||
Extension(EnginePath(engine_path)): Extension<EnginePath>,
|
||||
Json(config): Json<Config>,
|
||||
) -> NoContent {
|
||||
let max_age_secs: Option<u32> = config
|
||||
.delete_version_after
|
||||
.map(|v| parse_duration_str(&v).expect("Failed to parse duration string"));
|
||||
|
||||
// TODO: This
|
||||
let a = sqlx::query!(
|
||||
"UPDATE kv2_engine_cfg SET (max_versions, max_age_secs, cas_required) = ($2, $3, $4)
|
||||
WHERE engine_path = $1",
|
||||
engine_path,
|
||||
config.max_versions,
|
||||
max_age_secs,
|
||||
config.cas_required
|
||||
)
|
||||
.execute(&pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
NoContent
|
||||
}
|
||||
|
||||
async fn get_subkeys() -> &'static str {
|
||||
todo!("not implemented")
|
||||
}
|
||||
|
||||
async fn post_undelete() -> &'static str {
|
||||
todo!("not implemented")
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,19 +1,8 @@
|
|||
// There are some placeholder functions, that will have to be implemented before the first release.
|
||||
// They are marked with `todo!()` to indicate that they need to be implemented.
|
||||
// We want to keep these functions in the codebase.
|
||||
// That is why we choose to suppress unused warnings for now.
|
||||
// TODO
|
||||
#![allow(unused)]
|
||||
|
||||
use super::structs::KvV2WriteRequest;
|
||||
use crate::{
|
||||
DbPool,
|
||||
common::HttpError,
|
||||
engines::{
|
||||
EnginePath,
|
||||
kv::structs::{KvSecretData, KvSecretRes, KvV2WriteResponse, Wrapper},
|
||||
},
|
||||
storage::sealing::Secret,
|
||||
common::HttpError, engines::{
|
||||
kv::structs::{KvSecretData, KvSecretRes, KvV2WriteResponse, Wrapper}, EnginePath
|
||||
}, signing::Verifiable, storage::{sealing::Secret, SecretDataDTO}, DbPool
|
||||
};
|
||||
use axum::{
|
||||
Extension, Json,
|
||||
|
|
@ -22,7 +11,7 @@ use axum::{
|
|||
response::{IntoResponse, NoContent, Response},
|
||||
};
|
||||
use log::{debug, error, info, warn};
|
||||
use serde::Deserialize;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use time::{OffsetDateTime, UtcDateTime};
|
||||
|
||||
#[derive(Deserialize)]
|
||||
|
|
@ -34,6 +23,9 @@ pub struct GetDataQuery {
|
|||
}
|
||||
|
||||
/// Unluckily needed as `sqlx::query_as!()` does not support FromRow derivations
|
||||
#[rvault_server::signed_dbo]
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[deprecated("Use DTO instead")]
|
||||
struct SecretDataInternal {
|
||||
pub created_time: OffsetDateTime,
|
||||
pub deletion_time: Option<OffsetDateTime>,
|
||||
|
|
@ -44,7 +36,7 @@ struct SecretDataInternal {
|
|||
pub encrypted_data: Vec<u8>,
|
||||
}
|
||||
|
||||
impl SecretDataInternal {
|
||||
impl SecretDataDTO {
|
||||
pub async fn into_external(self) -> KvSecretData {
|
||||
let secret = Secret::new(self.encrypted_data, self.nonce).decrypt().await;
|
||||
KvSecretData {
|
||||
|
|
@ -68,16 +60,16 @@ pub async fn get_data(
|
|||
let res = if params.version != 0 {
|
||||
// With specific version
|
||||
sqlx::query_as!(
|
||||
SecretDataInternal,
|
||||
r#"SELECT nonce, encrypted_data, created_time, deletion_time, version_number, secret_path
|
||||
SecretDataDTO,
|
||||
r#"SELECT *
|
||||
FROM kv2_secret_version WHERE engine_path = $1 AND secret_path = $2 AND deletion_time IS NULL
|
||||
AND version_number = $3"#,
|
||||
engine_path, path, params.version).fetch_one(&pool).await
|
||||
} else {
|
||||
// Without specific version
|
||||
sqlx::query_as!(
|
||||
SecretDataInternal,
|
||||
r#"SELECT nonce, encrypted_data, created_time, deletion_time, version_number, secret_path
|
||||
SecretDataDTO,
|
||||
r#"SELECT *
|
||||
FROM kv2_secret_version WHERE engine_path = $1 AND secret_path = $2 AND deletion_time IS NULL
|
||||
ORDER BY version_number DESC LIMIT 1"#,
|
||||
engine_path, path).fetch_one(&pool).await
|
||||
|
|
@ -132,10 +124,7 @@ pub async fn post_data(
|
|||
|
||||
let content = serde_json::to_string(&secret.data).unwrap();
|
||||
|
||||
let Secret {
|
||||
nonce,
|
||||
protected_data,
|
||||
} = Secret::encrypt(&content).await.unwrap();
|
||||
let Secret { nonce, protected_data } = Secret::encrypt(&content).await.unwrap();
|
||||
let nonce = nonce.as_slice();
|
||||
|
||||
let mut tx = pool.begin().await.unwrap();
|
||||
|
|
@ -146,6 +135,17 @@ pub async fn post_data(
|
|||
ON CONFLICT(engine_path, secret_path) DO NOTHING;
|
||||
", engine_path, kv_path, ts).execute(&mut *tx).await.unwrap();
|
||||
|
||||
let secret_version = SecretDataDTO {
|
||||
signature: Vec::new(),
|
||||
created_time: todo!(),
|
||||
deletion_time: todo!(),
|
||||
version_number: todo!(),
|
||||
secret_path: todo!(),
|
||||
engine_path,
|
||||
nonce: todo!(),
|
||||
encrypted_data: todo!(),
|
||||
};
|
||||
let signature = secret_version.sign().await.to_vec();
|
||||
let res_r = sqlx::query_file!(
|
||||
"src/engines/kv/post_secret.sql",
|
||||
engine_path,
|
||||
|
|
@ -154,6 +154,7 @@ pub async fn post_data(
|
|||
protected_data,
|
||||
ts,
|
||||
secret.version,
|
||||
signature,
|
||||
)
|
||||
.fetch_one(&mut *tx)
|
||||
.await
|
||||
|
|
@ -243,11 +244,48 @@ pub async fn delete_data(
|
|||
Ok(NoContent.into_response())
|
||||
}
|
||||
|
||||
pub struct UndeleteReq {
|
||||
versions: Vec<i64>,
|
||||
}
|
||||
|
||||
pub async fn post_undelete(
|
||||
State(pool): State<DbPool>,
|
||||
Path(path): Path<String>,
|
||||
Extension(EnginePath(engine_path)): Extension<EnginePath>,
|
||||
Json(UndeleteReq { versions }): Json<UndeleteReq>,
|
||||
) -> Response {
|
||||
info!("Undeleting versions {versions:?} of {path} from {engine_path}");
|
||||
|
||||
let mut tx = pool.begin().await.unwrap();
|
||||
|
||||
for version in versions {
|
||||
sqlx::query!(
|
||||
r#"
|
||||
UPDATE kv2_secret_version
|
||||
SET deletion_time = NULL
|
||||
WHERE engine_path = $1 AND secret_path = $2
|
||||
AND version_number = $3
|
||||
"#,
|
||||
engine_path,
|
||||
path,
|
||||
version,
|
||||
)
|
||||
.execute(&mut *tx)
|
||||
.await;
|
||||
}
|
||||
|
||||
tx.commit().await.unwrap();
|
||||
|
||||
NoContent.into_response()
|
||||
}
|
||||
|
||||
|
||||
pub async fn patch_data(
|
||||
State(pool): State<DbPool>,
|
||||
Path(kv_path): Path<String>,
|
||||
Extension(EnginePath(engine_path)): Extension<EnginePath>,
|
||||
Json(secret): Json<KvV2WriteRequest>,
|
||||
) -> &'static str {
|
||||
todo!("not implemented")
|
||||
) -> Result<Response, ()> {
|
||||
// TODO: implement only application/merge-patch+json
|
||||
todo!("Not implemented")
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,10 +1,3 @@
|
|||
// There are some placeholder functions, that will have to be implemented before the first release.
|
||||
// They are marked with `todo!()` to indicate that they need to be implemented.
|
||||
// We want to keep these functions in the codebase.
|
||||
// That is why we choose to suppress unused warnings for now.
|
||||
// TODO
|
||||
#![allow(unused)]
|
||||
|
||||
use crate::storage::DbPool;
|
||||
use axum::extract::{Path, State};
|
||||
|
||||
|
|
|
|||
|
|
@ -1,10 +1,10 @@
|
|||
|
||||
WITH latest_version AS (
|
||||
SELECT MAX(version_number) AS max_version
|
||||
SELECT MAX(version_number) AS max_version, signature
|
||||
FROM kv2_secret_version
|
||||
WHERE engine_path = $1 AND secret_path = $2 -- engine_path AND secret_path
|
||||
)
|
||||
INSERT INTO kv2_secret_version (engine_path, secret_path, nonce, encrypted_data, created_time, version_number)
|
||||
INSERT INTO kv2_secret_version (engine_path, secret_path, nonce, encrypted_data, created_time, version_number, signature)
|
||||
VALUES (
|
||||
$1, -- engine_path
|
||||
$2, -- secret_path
|
||||
|
|
@ -14,6 +14,7 @@ VALUES (
|
|||
CASE -- Use provided version if given
|
||||
WHEN $6 IS NOT NULL THEN $6 -- version_number (optional)
|
||||
ELSE COALESCE((SELECT max_version FROM latest_version) + 1, 1) -- otherwise 1
|
||||
END -- version_number logic
|
||||
END, -- version_number logic
|
||||
$7 -- signature
|
||||
)
|
||||
RETURNING version_number;
|
||||
RETURNING version_number, signature;
|
||||
|
|
|
|||
|
|
@ -1,9 +1,3 @@
|
|||
// There are some placeholder functions, that will have to be implemented before the first release.
|
||||
// They are marked with `todo!()` to indicate that they need to be implemented.
|
||||
// We want to keep these functions in the codebase.
|
||||
// That is why we choose to suppress unused warnings for now.
|
||||
#![allow(unused)]
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::{collections::HashMap, vec};
|
||||
use time::{OffsetDateTime, UtcDateTime, serde::rfc3339};
|
||||
|
|
|
|||
66
src/engines/kv/tests.rs
Normal file
66
src/engines/kv/tests.rs
Normal file
|
|
@ -0,0 +1,66 @@
|
|||
use std::collections::HashMap;
|
||||
|
||||
use chrono::Utc;
|
||||
use tests::{
|
||||
logic::patch_metadata,
|
||||
structs::{SecretMeta, VersionMeta},
|
||||
};
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
#[cfg(target_feature = "_disabled")]
|
||||
fn print_serialized_test() {
|
||||
let temp_secret = TempSecret {
|
||||
content: String::from("Hallo"),
|
||||
version: 12,
|
||||
};
|
||||
let serialized = serialize_secret_json(&temp_secret);
|
||||
println!("string serialized: {:?}", serialized);
|
||||
let deserialized = deserialize_secret_struct(&serialized.unwrap());
|
||||
println!(
|
||||
"Struct field from deserialized: {}",
|
||||
deserialized.unwrap().content
|
||||
)
|
||||
}
|
||||
#[test]
|
||||
#[cfg(target_feature = "_disabled")]
|
||||
fn test_patching() {
|
||||
// TODO add more assertions
|
||||
let mut base = create_mock_meta();
|
||||
println!("OLD metadata: {:?}", base);
|
||||
let overwrite: SecretMeta = SecretMeta {
|
||||
max_versions: 10,
|
||||
versions: vec![VersionMeta {
|
||||
created_time: Utc::now(),
|
||||
deletion_time: Some(Utc::now()),
|
||||
destroyed: true,
|
||||
}],
|
||||
cas_required: true,
|
||||
delete_version_after: "10m".to_string(),
|
||||
current_version: 4,
|
||||
oldest_version: 2,
|
||||
updated_time: Utc::now(),
|
||||
created_time: Utc::now(),
|
||||
custom_metadata: Some(HashMap::new()),
|
||||
};
|
||||
let mut patched: Option<SecretMeta> = None; // Laurenz here
|
||||
match patch_metadata(&mut base, &overwrite) {
|
||||
Ok(meta) => {
|
||||
println!("NEW metadata: {:?}", meta);
|
||||
println!("patched successfully");
|
||||
patched = Some(meta);
|
||||
}
|
||||
Err(e) => {
|
||||
log::error!("error patching metadata: {}", e);
|
||||
panic!("Patching failed");
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(patched_meta) = patched {
|
||||
assert!(patched_meta.current_version == 4);
|
||||
assert!(patched_meta.versions[0].destroyed == true);
|
||||
} else {
|
||||
panic!("patched was not initialized");
|
||||
}
|
||||
}
|
||||
215
src/macros.rs
Normal file
215
src/macros.rs
Normal file
|
|
@ -0,0 +1,215 @@
|
|||
extern crate proc_macro;
|
||||
use proc_macro::TokenStream;
|
||||
use quote::quote;
|
||||
use sqlx::query;
|
||||
use syn::{parse_macro_input, token::Token, Fields, ItemStruct, LitStr};
|
||||
|
||||
/// Database Objects which are verifiable for integrity.\
|
||||
/// Extends struct with a `signature` attribute/field for the signature of the hash,
|
||||
/// which is skipped on serialization/deserialization.
|
||||
///
|
||||
/// After obtaining a verifiable struct, you may want to verify.
|
||||
/// After modifying, you may want to re-sign the data before updating the database entry,
|
||||
/// otherwise the saved data would violate integrity.
|
||||
///
|
||||
/// Implements the [crate::storage::signing::Verifiable] trait for usage.\
|
||||
/// Implies [serde::Serialize] due to hashing.\
|
||||
/// Only named structs are supported.
|
||||
#[proc_macro_attribute]
|
||||
pub fn signed_dbo(_attr: TokenStream, item: TokenStream) -> TokenStream {
|
||||
let input = parse_macro_input!(item as ItemStruct);
|
||||
|
||||
let vis = &input.vis;
|
||||
let struct_name = &input.ident;
|
||||
let fields = match &input.fields {
|
||||
Fields::Named(f) => &f.named,
|
||||
_ => panic!("Only named structs are supported"),
|
||||
};
|
||||
|
||||
let mut new_fields = quote! {
|
||||
#[serde(skip)]
|
||||
pub signature: Vec<u8>,
|
||||
};
|
||||
for field in fields {
|
||||
new_fields.extend(quote! { #field, });
|
||||
}
|
||||
// expand_input
|
||||
// let a = sqlx::query(r"SELECT name
|
||||
// FROM pragma_table_info('kv2_metadata')
|
||||
// WHERE pk > 0").fetch_one(&input.);
|
||||
// print!("aaa {a}");
|
||||
|
||||
let expanded = quote! {
|
||||
#[derive(serde::Serialize)]
|
||||
#vis struct #struct_name {
|
||||
#new_fields
|
||||
}
|
||||
|
||||
impl crate::storage::signing::Verifiable for #struct_name {
|
||||
async fn sign(&self) -> ed25519::Signature {
|
||||
crate::storage::signing::sign(self).await
|
||||
}
|
||||
|
||||
async fn verify<P: serde::Serialize>(
|
||||
&self,
|
||||
signature: &ed25519::Signature,
|
||||
) -> Result<(), ed25519::Error> {
|
||||
crate::storage::signing::verify(self, signature).await
|
||||
}
|
||||
}
|
||||
|
||||
impl #struct_name {
|
||||
async fn fetch_one() {
|
||||
// sqlx::query_as!(#struct_name, r"
|
||||
// SELECT * FROM $1 WHERE engine_path = $2, path = $3
|
||||
// ")
|
||||
}
|
||||
|
||||
async fn insert_one(&mut self) {
|
||||
// self.signature = await self.sign();
|
||||
// sqlx::query!(r"
|
||||
// INSERT INTO $1
|
||||
// ")
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
TokenStream::from(expanded)
|
||||
}
|
||||
|
||||
// #[proc_macro]
|
||||
// pub fn verifying_query(input: TokenStream) -> TokenStream {
|
||||
// struct VerifyingQueryInput {
|
||||
// target_type: syn::Type,
|
||||
// table_name: syn::LitStr,
|
||||
// }
|
||||
|
||||
// impl syn::parse::Parse for VerifyingQueryInput {
|
||||
// fn parse(input: syn::parse::ParseStream) -> syn::Result<Self> {
|
||||
// let target_type: syn::Type = input.parse()?;
|
||||
// input.parse::<syn::Token![,]>()?;
|
||||
// let table_name: syn::LitStr = input.parse()?;
|
||||
// Ok(VerifyingQueryInput { target_type, table_name })
|
||||
// }
|
||||
// }
|
||||
|
||||
// let VerifyingQueryInput { target_type, table_name } = parse_macro_input!(input as VerifyingQueryInput);
|
||||
|
||||
// // Extract the type (e.g., `MyType`), separated by a comma
|
||||
// let parsed_input: VerifyingQueryInput = syn::parse(input).expect("Failed to parse input");
|
||||
// let target_type = parsed_input.target_type;
|
||||
// let table = parsed_input.table_name.value();
|
||||
|
||||
// let sql = format!("SELECT * FROM {table}");
|
||||
// let query = quote! {
|
||||
// sqlx::query_as!(target_type, sql)
|
||||
// };
|
||||
|
||||
// query.into()
|
||||
// }
|
||||
|
||||
#[cfg(test)]
|
||||
#[deprecated(note = "doesnt work")]
|
||||
mod test_macro {
|
||||
use super::*;
|
||||
use quote::quote;
|
||||
use syn::{ItemStruct, parse_quote};
|
||||
|
||||
pub struct TestStruct {
|
||||
field1: String,
|
||||
field2: i32,
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_signed_dbo_macro() {
|
||||
let input: TokenStream = quote! {}.into();
|
||||
|
||||
let output = signed_dbo(TokenStream::new(), input);
|
||||
|
||||
let expected: TokenStream = quote! {
|
||||
pub struct TestStruct {
|
||||
signature: Vec<u8>,
|
||||
field1: String,
|
||||
field2: i32,
|
||||
}
|
||||
}
|
||||
.into();
|
||||
|
||||
assert_eq!(output.to_string(), expected.to_string());
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic(expected = "Only named structs are supported")]
|
||||
fn test_signed_dbo_macro_unnamed_struct() {
|
||||
let input: TokenStream = quote! {
|
||||
struct TestStruct(String, i32);
|
||||
}
|
||||
.into();
|
||||
|
||||
signed_dbo(TokenStream::new(), input);
|
||||
}
|
||||
|
||||
// #[test]
|
||||
// fn test_sqlx_select_macro() {
|
||||
// let input: TokenStream = quote! {
|
||||
// MyModel, "users", "WHERE id = ?", 42
|
||||
// }
|
||||
// .into();
|
||||
|
||||
// let output = sqlx_select(input);
|
||||
|
||||
// let expected: TokenStream = quote! {
|
||||
// sqlx::query_as!(
|
||||
// MyModel,
|
||||
// "SELECT id,name,email FROM users WHERE id = ?",
|
||||
// 42
|
||||
// )
|
||||
// }
|
||||
// .into();
|
||||
|
||||
// assert_eq!(output.to_string(), expected.to_string());
|
||||
// }
|
||||
}
|
||||
|
||||
// struct SelectInput {
|
||||
// model: proc_macro::Ident,
|
||||
// _comma1: syn::Token![,],
|
||||
// table: syn::LitStr,
|
||||
// _comma2: syn::Token![,],
|
||||
// condition: syn::LitStr,
|
||||
// _comma3: syn::Token![,],
|
||||
// args: syn::punctuated::Punctuated<syn::Expr, syn::Token![,]>,
|
||||
// }
|
||||
|
||||
// #[proc_macro]
|
||||
// pub fn sqlx_select(input: proc_macro::TokenStream) -> proc_macro::TokenStream {
|
||||
// let SelectInput { model, table, condition, args, .. } = syn::parse_macro_input!(input as SelectInput);
|
||||
|
||||
// // Hardcoded columns - this would be read from metadata in a full implementation
|
||||
// let columns = quote::quote! { id, name, email };
|
||||
|
||||
// let sql = format!(
|
||||
// "SELECT {} FROM {} {}",
|
||||
// columns.to_string().replace(' ', ""),
|
||||
// table.value(),
|
||||
// condition.value()
|
||||
// );
|
||||
|
||||
// let genn = quote::quote! {
|
||||
// sqlx::query_as!(
|
||||
// #model,
|
||||
// #sql,
|
||||
// #args
|
||||
// )
|
||||
// };
|
||||
|
||||
// genn.into()
|
||||
// }
|
||||
|
||||
// #[cfg(test)]
|
||||
// mod test_macro {
|
||||
// #[test]
|
||||
// fn test_aaaah() {
|
||||
// select_all!("aaa", "bbb");
|
||||
// }
|
||||
// }
|
||||
25
src/main.rs
25
src/main.rs
|
|
@ -1,12 +1,5 @@
|
|||
#![forbid(unsafe_code)]
|
||||
|
||||
// // There are some placeholder functions, that will have to be implemented before the first release.
|
||||
// // They are marked with `todo!()` to indicate that they need to be implemented.
|
||||
// // We want to keep these functions in the codebase.
|
||||
// // That is why we choose to suppress unused warnings for now.
|
||||
// #![allow(unused)]
|
||||
|
||||
use crate::common::HttpError;
|
||||
use axum::{
|
||||
Router,
|
||||
extract::Request,
|
||||
|
|
@ -20,6 +13,8 @@ use std::{env, net::SocketAddr, str::FromStr};
|
|||
use storage::DbPool;
|
||||
use tokio::{net::TcpListener, signal};
|
||||
|
||||
use crate::common::HttpError;
|
||||
|
||||
mod auth;
|
||||
mod common;
|
||||
mod engines;
|
||||
|
|
@ -27,10 +22,15 @@ mod identity;
|
|||
mod storage;
|
||||
mod sys;
|
||||
|
||||
pub use storage::signing;
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() {
|
||||
// NOTE: Rethink choice of environment variables in regards to security in the future
|
||||
let _ = dotenvy::dotenv();
|
||||
// To be configured via environment variables
|
||||
// choose from (highest to lowest): error, warn, info, debug, trace, off
|
||||
// env::set_var("RUST_LOG", "trace"); // TODO: Remove to respect user configuration
|
||||
// env::set_var("DATABASE_URL", "sqlite:test.db"); // TODO: move to .env
|
||||
env_logger::init();
|
||||
|
||||
// Listen on all IPv4 and IPv6 interfaces on port 8200 by default
|
||||
|
|
@ -56,8 +56,6 @@ async fn main() {
|
|||
storage::sealing::init_default(&pool).await;
|
||||
}
|
||||
|
||||
auth::token::create_root_token_if_none_exist(&pool).await;
|
||||
|
||||
warn!("Listening on {listen_addr}");
|
||||
// Start listening
|
||||
let listener = TcpListener::bind(listen_addr).await.unwrap();
|
||||
|
|
@ -67,15 +65,13 @@ async fn main() {
|
|||
.unwrap();
|
||||
}
|
||||
|
||||
/// Middleware setting unspecified `Content-Type`s to json since this is done by client libraries.
|
||||
/// Axum's [axum::extract::Json] rejects extraction attempts without json content type.
|
||||
async fn set_default_content_type_json(
|
||||
mut req: Request,
|
||||
next: Next,
|
||||
) -> Result<impl IntoResponse, Response> {
|
||||
if req.headers().get("content-type").is_none() {
|
||||
let headers = req.headers_mut();
|
||||
|
||||
// debug!("Request header: \n{:?}", headers);
|
||||
headers.insert("content-type", "application/json".parse().unwrap());
|
||||
}
|
||||
|
||||
|
|
@ -109,7 +105,6 @@ async fn shutdown_signal(pool: DbPool) {
|
|||
}
|
||||
|
||||
/// Fallback route for unknown routes
|
||||
///
|
||||
/// Note: `/v1/*` is handled by [`engines::secrets_router`]
|
||||
async fn fallback_route_unknown(req: Request) -> Response {
|
||||
log::error!(
|
||||
|
|
@ -122,7 +117,7 @@ async fn fallback_route_unknown(req: Request) -> Response {
|
|||
HttpError::simple(StatusCode::NOT_FOUND, "Route not implemented")
|
||||
}
|
||||
|
||||
/// Basic handler that responds with a static string
|
||||
/// basic handler that responds with a static string
|
||||
async fn root() -> &'static str {
|
||||
info!("Hello world");
|
||||
"Hello, World!"
|
||||
|
|
|
|||
|
|
@ -1,20 +1,17 @@
|
|||
pub mod sealing;
|
||||
pub mod signing;
|
||||
mod dtos;
|
||||
|
||||
pub use dtos::*;
|
||||
|
||||
use std::{fs::File, path::Path};
|
||||
|
||||
use log::*;
|
||||
use sqlx::{Pool, Sqlite, sqlite::SqlitePoolOptions};
|
||||
use sqlx::{sqlite::SqlitePoolOptions, Pool, Sqlite};
|
||||
|
||||
pub(crate) type DbType = Sqlite;
|
||||
pub(crate) type DbPool = Pool<DbType>;
|
||||
|
||||
/// Creates a SQLx SQLite database pool.
|
||||
/// If nonexistent, it creates a new SQLite file.
|
||||
///
|
||||
/// Note: rvault uses compile-time queries.
|
||||
/// Hence, during development a migrated SQLite file is required.
|
||||
/// Use `cargo sqlx database reset` if required.
|
||||
/// Otherwise, set the env var `SQLX_OFFLINE=true` during compilation (not helpful for development).
|
||||
pub async fn create_pool(db_url: String) -> DbPool {
|
||||
// Create SQLite database file if it does not exist
|
||||
if db_url.starts_with("sqlite:") && db_url != ("sqlite::memory:") {
|
||||
|
|
|
|||
14
src/storage/dtos.rs
Normal file
14
src/storage/dtos.rs
Normal file
|
|
@ -0,0 +1,14 @@
|
|||
use time::OffsetDateTime;
|
||||
|
||||
/// Unluckily needed as `sqlx::query_as!()` does not support FromRow derivations
|
||||
#[rvault_server::signed_dbo]
|
||||
pub struct SecretDataDTO {
|
||||
pub created_time: OffsetDateTime,
|
||||
pub deletion_time: Option<OffsetDateTime>,
|
||||
pub version_number: i64,
|
||||
pub secret_path: String,
|
||||
pub engine_path: String,
|
||||
|
||||
pub nonce: Vec<u8>,
|
||||
pub encrypted_data: Vec<u8>,
|
||||
}
|
||||
|
|
@ -212,6 +212,10 @@ impl Secret {
|
|||
|
||||
/// Encrypt a secret
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// Panics if .
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
/// This function will return an error if the vault is uninitialized or an unknown error occurs.
|
||||
|
|
@ -317,11 +321,11 @@ pub async fn init_default(pool: &DbPool) {
|
|||
|
||||
#[cfg(feature = "shamir")]
|
||||
{
|
||||
shamir::init_shamir(pool, 2, 5).await
|
||||
shamir::init_shamir(&pool, 2, 5).await
|
||||
}
|
||||
};
|
||||
|
||||
let success = prepare_unseal(pool).await;
|
||||
let success = prepare_unseal(&pool).await;
|
||||
warn!("New sealing password generated: {user_key:?}");
|
||||
assert!(
|
||||
success,
|
||||
|
|
|
|||
|
|
@ -15,7 +15,7 @@ use zeroize::ZeroizeOnDrop;
|
|||
|
||||
use crate::DbPool;
|
||||
|
||||
use super::{Sealing, UnsealResult, write_new_root_key};
|
||||
use super::{write_new_root_key, Sealing, UnsealResult};
|
||||
|
||||
type P256Share = DefaultShare<IdentifierPrimeField<Scalar>, IdentifierPrimeField<Scalar>>;
|
||||
|
||||
|
|
@ -29,8 +29,6 @@ struct ShamirPortion {
|
|||
}
|
||||
|
||||
#[derive(PartialEq)]
|
||||
/// Container for multiple [ShamirPortion]s and the protected root key.
|
||||
/// Multiple instances could exist in the future for per-namespace encryption.
|
||||
pub struct ShamirBucket {
|
||||
portions: Vec<ShamirPortion>,
|
||||
protected_rk: Vec<u8>,
|
||||
|
|
@ -38,7 +36,7 @@ pub struct ShamirBucket {
|
|||
}
|
||||
|
||||
impl Sealing for ShamirBucket {
|
||||
fn new(protected_rk: Vec<u8>, nonce: Vec<u8>) -> Self {
|
||||
fn new(protected_rk: Vec<u8>, nonce: Vec<u8>) -> Self {
|
||||
Self {
|
||||
portions: Vec::with_capacity(2),
|
||||
protected_rk,
|
||||
|
|
@ -46,7 +44,7 @@ impl Sealing for ShamirBucket {
|
|||
}
|
||||
}
|
||||
|
||||
async fn unseal(&mut self, key: String) -> UnsealResult {
|
||||
async fn unseal(&mut self, key: String) -> UnsealResult {
|
||||
let key = match BASE64_STANDARD.decode(key) {
|
||||
Ok(v) => v,
|
||||
Err(e) => {
|
||||
|
|
@ -68,7 +66,7 @@ impl Sealing for ShamirBucket {
|
|||
}
|
||||
self.portions.push(key_portion);
|
||||
|
||||
let joined_keys = match join_keys(&self.portions) {
|
||||
let abc = match join_keys(&self.portions) {
|
||||
Ok(v) => v,
|
||||
Err(e) => {
|
||||
return match e {
|
||||
|
|
@ -86,7 +84,7 @@ impl Sealing for ShamirBucket {
|
|||
}
|
||||
.to_bytes();
|
||||
|
||||
let cipher = match Aes256GcmSiv::new_from_slice(&joined_keys) {
|
||||
let cipher = match Aes256GcmSiv::new_from_slice(&abc) {
|
||||
Ok(v) => v,
|
||||
Err(e) => {
|
||||
info!("Cipher could not be created from slice: {e}");
|
||||
|
|
@ -147,7 +145,7 @@ fn share_keys(
|
|||
limit: usize,
|
||||
root_key: &[u8],
|
||||
) -> Vec<String> {
|
||||
log::debug!("RK: {root_key:?}");
|
||||
// log::debug!("RK: {root_key:?}");
|
||||
assert!(
|
||||
threshold <= limit,
|
||||
"Threshold cannot be higher than the number of shares (limit)"
|
||||
|
|
|
|||
|
|
@ -6,7 +6,7 @@ use base64::{Engine, prelude::BASE64_STANDARD};
|
|||
|
||||
use crate::DbPool;
|
||||
|
||||
use super::{Sealing, UnsealResult, write_new_root_key};
|
||||
use super::{write_new_root_key, Sealing, UnsealResult};
|
||||
|
||||
/// Pair of protected root key and nonce
|
||||
#[derive(PartialEq)]
|
||||
|
|
@ -26,7 +26,6 @@ impl Sealing for SimpleSealing {
|
|||
}
|
||||
}
|
||||
|
||||
/// Initialize the vault with a simple password
|
||||
#[allow(unused)]
|
||||
pub async fn init_simple(pool: &DbPool) -> String {
|
||||
let root_key = Aes256GcmSiv::generate_key(&mut OsRng);
|
||||
|
|
|
|||
181
src/storage/signing.rs
Normal file
181
src/storage/signing.rs
Normal file
|
|
@ -0,0 +1,181 @@
|
|||
use std::sync::LazyLock;
|
||||
|
||||
use bincode::{config, serde::encode_to_vec};
|
||||
use ed25519::signature::{Signer, Verifier};
|
||||
use ed25519_dalek::{SigningKey, VerifyingKey};
|
||||
use serde::Serialize;
|
||||
use tokio::sync::RwLock;
|
||||
use zeroize::Zeroize;
|
||||
|
||||
pub type SignatureBundle =
|
||||
SignatureKeyBundle<ed25519_dalek::SigningKey, ed25519_dalek::VerifyingKey>;
|
||||
|
||||
static SIGNATURE_BUNDLE: LazyLock<RwLock<SignatureBundle>> =
|
||||
LazyLock::new(|| RwLock::new(SignatureKeyBundle::new()));
|
||||
|
||||
pub trait Verifiable {
|
||||
fn sign(&self) -> impl std::future::Future<Output = ed25519::Signature> + Send;
|
||||
fn verify<P: Serialize>(
|
||||
&self,
|
||||
signature: &ed25519::Signature,
|
||||
) -> impl std::future::Future<Output = Result<(), ed25519::Error>> + Send;
|
||||
}
|
||||
|
||||
#[derive(Zeroize)]
|
||||
struct SignatureKeyBundle<S, V>
|
||||
where
|
||||
S: Signer<ed25519::Signature>,
|
||||
V: Verifier<ed25519::Signature>,
|
||||
{
|
||||
pub signing_key: S,
|
||||
pub verifying_key: V,
|
||||
}
|
||||
|
||||
impl<S, V> SignatureKeyBundle<S, V>
|
||||
where
|
||||
S: Signer<ed25519::Signature>,
|
||||
V: Verifier<ed25519::Signature>,
|
||||
{
|
||||
/// Signs a serializable payload
|
||||
pub fn sign<P: Serialize>(&self, payload: &P) -> ed25519::Signature {
|
||||
self.sign_bytes(&SignatureKeyBundle::<S, V>::hash_struct(payload))
|
||||
}
|
||||
|
||||
fn sign_bytes(&self, payload: &[u8]) -> ed25519::Signature {
|
||||
// NOTE: use `try_sign` if you'd like to be able to handle
|
||||
// errors from external signing services/devices (e.g. HSM/KMS)
|
||||
// <https://docs.rs/signature/latest/signature/trait.Signer.html#tymethod.try_sign>
|
||||
self.signing_key.sign(payload)
|
||||
}
|
||||
|
||||
/// Verifies a serializable payload against a given signature.
|
||||
pub fn verify<P: Serialize>(
|
||||
&self,
|
||||
payload: &P,
|
||||
signature: &ed25519::Signature,
|
||||
) -> Result<(), ed25519::Error> {
|
||||
self.verify_bytes(&SignatureKeyBundle::<S, V>::hash_struct(payload), signature)
|
||||
}
|
||||
|
||||
fn verify_bytes(
|
||||
&self,
|
||||
payload: &[u8],
|
||||
signature: &ed25519::Signature,
|
||||
) -> Result<(), ed25519::Error> {
|
||||
self.verifying_key.verify(payload, signature)
|
||||
}
|
||||
|
||||
/// Serializes and hashes payload.
|
||||
/// Uses `bincode` for serialization and `blake3` for hashing.
|
||||
fn hash_struct<P: Serialize>(payload: &P) -> [u8; blake3::OUT_LEN] {
|
||||
let serialized_payload =
|
||||
encode_to_vec(payload, config::standard()).expect("Failed to serialize payload");
|
||||
let hash: blake3::Hash = blake3::hash(&serialized_payload);
|
||||
let hash_bytes = hash.as_bytes();
|
||||
|
||||
*hash_bytes
|
||||
}
|
||||
}
|
||||
|
||||
impl SignatureKeyBundle<SigningKey, VerifyingKey> {
|
||||
pub fn new() -> SignatureKeyBundle<SigningKey, VerifyingKey> {
|
||||
let mut rng = aes_gcm_siv::aead::OsRng;
|
||||
let signing_key = SigningKey::generate(&mut rng);
|
||||
let verifying_key: VerifyingKey = signing_key.verifying_key();
|
||||
Self {
|
||||
signing_key,
|
||||
verifying_key,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn sign<P: Serialize>(payload: &P) -> ed25519::Signature {
|
||||
SIGNATURE_BUNDLE.read().await.sign(payload)
|
||||
}
|
||||
|
||||
pub async fn verify<P: Serialize>(
|
||||
payload: &P,
|
||||
signature: &ed25519::Signature,
|
||||
) -> Result<(), ed25519::Error> {
|
||||
SIGNATURE_BUNDLE.read().await.verify(payload, signature)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[derive(Serialize)]
|
||||
struct TestPayload {
|
||||
data: String,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
struct TestPayloadEvolution {
|
||||
data: String,
|
||||
new_prop: Option<bool>,
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_sign_and_verify() {
|
||||
let key_bundle = SignatureKeyBundle::new();
|
||||
let payload = TestPayload {
|
||||
data: "test data".to_string(),
|
||||
};
|
||||
|
||||
let signature = key_bundle.sign(&payload);
|
||||
assert!(key_bundle.verify(&payload, &signature).is_ok());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_verify_with_invalid_signature() {
|
||||
let key_bundle = SignatureKeyBundle::new();
|
||||
let payload = TestPayload {
|
||||
data: "test data".to_string(),
|
||||
};
|
||||
|
||||
let signature = key_bundle.sign(&payload);
|
||||
|
||||
let invalid_payload = TestPayload {
|
||||
data: "tampered data".to_string(),
|
||||
};
|
||||
|
||||
assert!(key_bundle.verify(&invalid_payload, &signature).is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_verify_with_different_bundles() {
|
||||
let key_bundle = SignatureKeyBundle::new();
|
||||
let payload = TestPayload {
|
||||
data: "test data".to_string(),
|
||||
};
|
||||
let signature = key_bundle.sign(&payload);
|
||||
|
||||
let key_bundle = SignatureKeyBundle::new();
|
||||
assert!(key_bundle.verify(&payload, &signature).is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_sign_bytes_and_verify_bytes() {
|
||||
let key_bundle = SignatureKeyBundle::new();
|
||||
let payload = b"test bytes";
|
||||
|
||||
let signature = key_bundle.sign_bytes(payload);
|
||||
assert!(key_bundle.verify_bytes(payload, &signature).is_ok());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_verify_bytes_with_invalid_signature() {
|
||||
let key_bundle = SignatureKeyBundle::new();
|
||||
let payload = b"test bytes";
|
||||
|
||||
let signature = key_bundle.sign_bytes(payload);
|
||||
|
||||
let invalid_payload = b"tampered bytes";
|
||||
|
||||
assert!(
|
||||
key_bundle
|
||||
.verify_bytes(invalid_payload, &signature)
|
||||
.is_err()
|
||||
);
|
||||
}
|
||||
}
|
||||
|
|
@ -5,10 +5,8 @@ use crate::DbPool;
|
|||
pub fn root_generation() -> Router<DbPool> {
|
||||
Router::new()
|
||||
// .route("/generate-root", get(get_root_generation_attempt))
|
||||
// .route("/generate-root", delete(cancel_generate_root))
|
||||
.route("/generate-root", post(generate_new_root))
|
||||
// .route("/generate-root", delete(cancel_generate_root))
|
||||
}
|
||||
|
||||
async fn generate_new_root() {
|
||||
todo!()
|
||||
}
|
||||
async fn generate_new_root() {}
|
||||
|
|
|
|||
|
|
@ -1,7 +1,5 @@
|
|||
use axum::{
|
||||
Json, Router,
|
||||
extract::State,
|
||||
routing::{get, post, put},
|
||||
extract::State, routing::{get, post, put}, Json, Router
|
||||
};
|
||||
use log::warn;
|
||||
use serde::Deserialize;
|
||||
|
|
@ -13,7 +11,7 @@ pub fn sealing_routes() -> Router<DbPool> {
|
|||
.route("/seal", post(seal_post))
|
||||
.route("/seal-status", get(seal_status_get))
|
||||
.route("/unseal", post(unseal_post))
|
||||
// Again? Its supposed to be POST but actually a PUT
|
||||
// WTF? Again? Its supposed to be POST but actually a PUT
|
||||
.route("/unseal", put(unseal_post))
|
||||
}
|
||||
|
||||
|
|
@ -49,6 +47,4 @@ async fn unseal_post(State(pool): State<DbPool>, Json(req): Json<UnsealRequest>)
|
|||
Ok(())
|
||||
}
|
||||
|
||||
async fn seal_status_get(State(_pool): State<DbPool>) -> &'static str {
|
||||
todo!("not implemented")
|
||||
}
|
||||
async fn seal_status_get(State(pool): State<DbPool>) {}
|
||||
|
|
|
|||
Loading…
Reference in a new issue