Compare commits

..

16 commits
dev ... docs

Author SHA1 Message Date
someone
699ed7be40
Added testing instructions 2024-06-02 23:04:53 +02:00
someone
01625655e7
further doc work 2024-06-02 23:00:04 +02:00
sam
82d8fa7509
+ sysdesign v0.5
- new abstract
2024-06-02 13:39:49 -07:00
someone
12084372a8
Further doc work 2024-06-02 22:29:10 +02:00
someone
3c1df01014
Edited Compliance and testing 2024-06-02 21:53:11 +02:00
sam
e1cef70830
+ system design mock 2024-06-02 12:54:06 -07:00
someone
8f358517e5
updated Compliance and testing 2024-06-02 21:11:07 +02:00
someone
480d4f7e3d
Changes to be committed:
modified:   documentation.typ
	new file:   rvault_description.txt
2024-06-01 14:15:21 +02:00
2e40d2061a
Docs (architecture): Describe Contextual Routing 2024-05-08 15:05:11 +02:00
b54f6811f3
Restructure: Split chapters into dedicated files 2024-05-08 15:04:38 +02:00
d0f9653b17
Add .env and test.db to .gitignore 2024-05-07 16:21:48 +02:00
adbf30f77f
Add structure to doc 2024-04-11 00:04:27 +02:00
e7e9332397
fix: gitignore, swith to typst 2024-04-10 17:44:53 +02:00
59516c37db
fix: gitignore, create document root 2024-04-10 17:42:52 +02:00
Samuql
8dba25c31f
Create README.md 2024-04-08 15:10:05 +02:00
592f8144fc
Add gitignore 2024-03-18 15:19:25 +01:00
62 changed files with 212 additions and 67580 deletions

View file

@ -1,25 +0,0 @@
name: Rust
on:
# push:
# branches: [ "dev" ]
pull_request:
branches: [ "dev" ]
env:
CARGO_TERM_COLOR: always
jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- name: Build
run: cargo build --verbose
- name: Run tests
run: cargo test --verbose
- name: Run Clippy lints
run: cargo clippy --all-targets --all-features
- name: Run format checks
run: cargo fmt --check

4
.gitignore vendored
View file

@ -3,9 +3,7 @@
.vscode/
.idea/
.env
test.db
*.pdf
target/
go_client/openapi.json
*.db*

View file

@ -1,12 +0,0 @@
{
"db_name": "SQLite",
"query": "\n UPDATE kv2_secret_version\n SET deletion_time = $4\n WHERE engine_path = $1 AND secret_path = $2\n AND version_number = $3\n ",
"describe": {
"columns": [],
"parameters": {
"Right": 4
},
"nullable": []
},
"hash": "047ebbce6fa0073cc810b189e8db3ff5e4eb347f1c1d9e5408220411a9e08b00"
}

View file

@ -1,44 +0,0 @@
{
"db_name": "SQLite",
"query": "SELECT service_token.* FROM service_token, service_token_role_membership\n WHERE service_token.id = service_token_role_membership.token_id AND\n service_token_role_membership.role_name = 'root'\n LIMIT 1",
"describe": {
"columns": [
{
"name": "id",
"ordinal": 0,
"type_info": "Text"
},
{
"name": "key",
"ordinal": 1,
"type_info": "Text"
},
{
"name": "expiry",
"ordinal": 2,
"type_info": "Integer"
},
{
"name": "parent_id",
"ordinal": 3,
"type_info": "Text"
},
{
"name": "identity_id",
"ordinal": 4,
"type_info": "Text"
}
],
"parameters": {
"Right": 0
},
"nullable": [
false,
false,
true,
true,
true
]
},
"hash": "0aa5c76c9ea1692da29a0f39998946d230f92a8f252294b25afeabe05749f4ca"
}

View file

@ -1,44 +0,0 @@
{
"db_name": "SQLite",
"query": "SELECT * FROM 'service_token' WHERE key = $1 AND (expiry IS NULL OR expiry > $2) LIMIT 1",
"describe": {
"columns": [
{
"name": "id",
"ordinal": 0,
"type_info": "Text"
},
{
"name": "key",
"ordinal": 1,
"type_info": "Text"
},
{
"name": "expiry",
"ordinal": 2,
"type_info": "Integer"
},
{
"name": "parent_id",
"ordinal": 3,
"type_info": "Text"
},
{
"name": "identity_id",
"ordinal": 4,
"type_info": "Text"
}
],
"parameters": {
"Right": 2
},
"nullable": [
false,
false,
true,
true,
true
]
},
"hash": "2cbe2fbcd5d8fb6d489f9e3cc7e04182f226964ea9d84219abbe6958dcccfefe"
}

View file

@ -1,26 +0,0 @@
{
"db_name": "SQLite",
"query": "SELECT * FROM 'service_token_role_membership' WHERE token_id = $1",
"describe": {
"columns": [
{
"name": "role_name",
"ordinal": 0,
"type_info": "Text"
},
{
"name": "token_id",
"ordinal": 1,
"type_info": "Text"
}
],
"parameters": {
"Right": 1
},
"nullable": [
false,
false
]
},
"hash": "36485bb70f499346cd1be569887ea8b6f438f4f845ef883e80d58875b839500a"
}

View file

@ -1,20 +0,0 @@
{
"db_name": "SQLite",
"query": "\n SELECT version_number AS latest_version FROM kv2_secret_version\n WHERE engine_path = $1 AND secret_path = $2 AND deletion_time IS NULL\n ORDER BY version_number DESC LIMIT 1",
"describe": {
"columns": [
{
"name": "latest_version",
"ordinal": 0,
"type_info": "Integer"
}
],
"parameters": {
"Right": 2
},
"nullable": [
false
]
},
"hash": "414c74a3c017bde424fe44bbc251fea384b0dbedd1541900d147e0814c1f33d8"
}

View file

@ -1,32 +0,0 @@
{
"db_name": "SQLite",
"query": "SELECT encrypted_key, type as protection_type, nonce FROM root_key ORDER BY version LIMIT 1",
"describe": {
"columns": [
{
"name": "encrypted_key",
"ordinal": 0,
"type_info": "Blob"
},
{
"name": "protection_type",
"ordinal": 1,
"type_info": "Text"
},
{
"name": "nonce",
"ordinal": 2,
"type_info": "Blob"
}
],
"parameters": {
"Right": 0
},
"nullable": [
false,
false,
true
]
},
"hash": "5630a591626bd416be0d1ab12fa993055b521e81382897d247ceee1b41f0bf42"
}

View file

@ -1,20 +0,0 @@
{
"db_name": "SQLite",
"query": "\nWITH latest_version AS (\n SELECT MAX(version_number) AS max_version\n FROM kv2_secret_version\n WHERE engine_path = $1 AND secret_path = $2 -- engine_path AND secret_path\n)\nINSERT INTO kv2_secret_version (engine_path, secret_path, nonce, encrypted_data, created_time, version_number)\nVALUES (\n $1, -- engine_path\n $2, -- secret_path\n $3, -- nonce\n $4, -- encrypted_data\n $5, -- created_time\n CASE -- Use provided version if given\n WHEN $6 IS NOT NULL THEN $6 -- version_number (optional)\n ELSE COALESCE((SELECT max_version FROM latest_version) + 1, 1) -- otherwise 1\n END -- version_number logic\n)\nRETURNING version_number;\n",
"describe": {
"columns": [
{
"name": "version_number",
"ordinal": 0,
"type_info": "Integer"
}
],
"parameters": {
"Right": 6
},
"nullable": [
false
]
},
"hash": "8f7bfd1840d14efec44c7b59ab10461ff122ead43076ad841883a9dd189a4f37"
}

View file

@ -1,20 +0,0 @@
{
"db_name": "SQLite",
"query": "SELECT engine_type FROM secret_engines WHERE mount_point = $1",
"describe": {
"columns": [
{
"name": "engine_type",
"ordinal": 0,
"type_info": "Text"
}
],
"parameters": {
"Right": 1
},
"nullable": [
false
]
},
"hash": "9265f0195bbacd15061927c2a6034e3725a25068fd3faa08cc1d02e7c926f1c2"
}

View file

@ -1,12 +0,0 @@
{
"db_name": "SQLite",
"query": "\n INSERT INTO root_key (encrypted_key, type, version, nonce)\n VALUES ($1, $2, 1, $3)\n ",
"describe": {
"columns": [],
"parameters": {
"Right": 3
},
"nullable": []
},
"hash": "aa131c57e0e255bfe07488095bdf25ab39e9dee182d0aecf988c9d3c2d04e66d"
}

View file

@ -1,12 +0,0 @@
{
"db_name": "SQLite",
"query": "\n INSERT INTO kv2_metadata (engine_path, secret_path, cas_required, created_time, max_versions, updated_time)\n VALUES ($1, $2, 0, $3, 100, $3)\n ON CONFLICT(engine_path, secret_path) DO NOTHING;\n ",
"describe": {
"columns": [],
"parameters": {
"Right": 3
},
"nullable": []
},
"hash": "af57fe92ead35790b02f38f34e1614cd1accb2da61f1d9a07eeefb0fc31ec318"
}

View file

@ -1,50 +0,0 @@
{
"db_name": "SQLite",
"query": "SELECT nonce, encrypted_data, created_time, deletion_time, version_number, secret_path\n FROM kv2_secret_version WHERE engine_path = $1 AND secret_path = $2 AND deletion_time IS NULL\n ORDER BY version_number DESC LIMIT 1",
"describe": {
"columns": [
{
"name": "nonce",
"ordinal": 0,
"type_info": "Blob"
},
{
"name": "encrypted_data",
"ordinal": 1,
"type_info": "Blob"
},
{
"name": "created_time",
"ordinal": 2,
"type_info": "Datetime"
},
{
"name": "deletion_time",
"ordinal": 3,
"type_info": "Datetime"
},
{
"name": "version_number",
"ordinal": 4,
"type_info": "Integer"
},
{
"name": "secret_path",
"ordinal": 5,
"type_info": "Text"
}
],
"parameters": {
"Right": 2
},
"nullable": [
false,
false,
false,
true,
false,
false
]
},
"hash": "b78c62fe22c4e93c54ecbc0c0cdfa31387baf14bea1ac8d27170e8b6cb456114"
}

View file

@ -1,50 +0,0 @@
{
"db_name": "SQLite",
"query": "SELECT nonce, encrypted_data, created_time, deletion_time, version_number, secret_path\n FROM kv2_secret_version WHERE engine_path = $1 AND secret_path = $2 AND deletion_time IS NULL\n AND version_number = $3",
"describe": {
"columns": [
{
"name": "nonce",
"ordinal": 0,
"type_info": "Blob"
},
{
"name": "encrypted_data",
"ordinal": 1,
"type_info": "Blob"
},
{
"name": "created_time",
"ordinal": 2,
"type_info": "Datetime"
},
{
"name": "deletion_time",
"ordinal": 3,
"type_info": "Datetime"
},
{
"name": "version_number",
"ordinal": 4,
"type_info": "Integer"
},
{
"name": "secret_path",
"ordinal": 5,
"type_info": "Text"
}
],
"parameters": {
"Right": 3
},
"nullable": [
false,
false,
false,
true,
false,
false
]
},
"hash": "fa8c74205ae4d497983d394ee04181c08d20cdb4a93bfce3c06a114133cd6619"
}

View file

@ -1,12 +0,0 @@
{
"db_name": "SQLite",
"query": "\n INSERT INTO service_token (id, key) VALUES ($1, $2);\n INSERT INTO service_token_role_membership (token_id, role_name) VALUES ($3, 'root');\n ",
"describe": {
"columns": [],
"parameters": {
"Right": 3
},
"nullable": []
},
"hash": "fe6bf34448b9f9defc27ce30a128935d991cd06e22861086c3b1377916731e57"
}

5
.vscode/settings.json vendored Normal file
View file

@ -0,0 +1,5 @@
{
"editor.tabSize": 2,
"editor.detectIndentation": false,
"editor.insertSpaces": false
}

2838
Cargo.lock generated

File diff suppressed because it is too large Load diff

View file

@ -1,51 +0,0 @@
[package]
name = "rvault-server"
version = "0.1.0"
edition = "2024"
[features]
default = ["shamir"]
# default = ["insecure-dev-sealing"]
insecure-dev-sealing = []
shamir = ["vsss-rs", "p256"]
[dependencies]
log = "0.4.27"
env_logger = "0.11.7"
zeroize = { version = "1.8.1", features = ["zeroize_derive"] }
time = { version = "0.3.41", features = ["serde", "formatting"]}
tokio = { version = "1.44.1", features = ["full"] }
tower = { version = "0.5.2", features = [] }
axum = "0.8.3"
serde = "1.0.219"
serde_json = "1.0.140"
dotenvy = "0.15.7"
base64 = "0.22.1"
# utoipa = { version = "4.2.0", features = ["axum_extras"] }
sqlx = { version = "0.8.3", features = [
"sqlite",
# "postgres",
# "any",
"macros",
"runtime-tokio",
"tls-rustls",
"time"
] }
aes-gcm-siv = "0.11.1"
vsss-rs = { version = "5.1.0", optional = true, default-features = false, features = ["zeroize", "std"] }
p256 = { version = "0.13.2", optional = true, default-features = false, features = ["std", "ecdsa"] }
rand = "0.8.5"
uuid = { version = "1.16.0", features = ["v4"] }
[lints]
workspace = true
[workspace.lints.clippy]
uninlined_format_args = "warn"
correctness = "warn"
suspicious = "warn"
complexity = "warn"
perf = "warn"
style = "warn"

View file

@ -1,32 +0,0 @@
ARG alpine_version=3.22
FROM docker.io/library/rust:1-alpine${alpine_version} AS builder
WORKDIR /src
RUN apk add --no-cache musl-dev
RUN cargo install sqlx-cli --no-default-features --features sqlite
# Required for compile-time schemata checks of migrations
ENV DATABASE_URL=sqlite:/tmp/rvault.db
RUN touch /tmp/rvault.db
COPY Cargo.toml Cargo.lock .
RUN --mount=type=cache,target=/usr/local/cargo/registry \
mkdir src && echo "fn main() {}" > src/main.rs && \
cargo fetch --locked --target $(rustc -vV | sed -n 's|host: ||p') && \
rm src/main.rs
# Required for compile-time checks of query - database-schema compatibility
COPY migrations migrations
RUN cargo sqlx migrate run
COPY src src
RUN --mount=type=cache,target=/usr/local/cargo/registry \
cargo build --release --locked
FROM docker.io/library/alpine:${alpine_version} AS runner
# FROM scratch AS runner
COPY --from=builder /src/target/release/rvault-server /usr/bin/rvault-server
CMD ["/usr/bin/rvault-server"]

View file

@ -1,26 +0,0 @@
build_tests:
podman build -t rvault-go-tests -f ./go_tests/Containerfile ./go_tests
run_tests: build_tests
podman run --rm -it --net=host rvault-go-tests
build_server:
cargo build
start_server: build_server
RUST_LOG=server=trace cargo run
# watch_server:
# RUST_LOG=server=trace cargo watch -x run
# test_server: build_server build_tests
# just start_server & sleep 1 && podman run --rm -it --net=host rvault-go-tests
check:
cargo fmt --check
cargo clippy --all-targets --all-features
cargo test
kill_junk:
fuser -k 8200/tcp

View file

@ -1,30 +0,0 @@
# rvault
rvault is an open-source implementation of the API of Vault and OpenBao, written in Rust.
## Running
You can run an offline build with `SQLX_OFFLINE=true cargo run` or `build`, respectively.
An offline build requires an up-to-date SQLx preparation.
An OCI container image can be created using `podman build . -t rvault`.
Furthermore, rvault attempts to read a `.env` file in the current working directory.
For example, its content could be:
```txt
DATABASE_URL=sqlite:test.db
RUST_LOG=debug
```
## Development
SQLx preparation can be updated with `cargo sqlx prep`.
Hence, it is not useful for development.
With `cargo sqlx database reset` the database will be recreated,
deleting all contents and reapplying migrations.
This is helpful when changing migrations during development.
When running a normal, not-offline, build, the database must be migrated (e.g. using `cargo sqlx database reset`)
for compilation of compile-time-checked queries.

21
assets/Design.svg Normal file

File diff suppressed because one or more lines are too long

After

Width:  |  Height:  |  Size: 88 KiB

View file

@ -0,0 +1,14 @@
= Project Requirements
This chapter will outline the requirements
to be fulfilled by this project in regards to the DHBW.
Eines der Gruppenmitglieder muss bis zum TODO folgende Dateien hier hochladen:
- Ausarbeitung als PDF (use dhbw template later on)
- Präsentation als PDF
- Textdatei mit Teilnehmern, Projekttitel und Kurzbeschreibung
- Programmentwurf (komplettes Repository, was bewertet wird) als ZIP / TAR
Die Prüfungsleistung muss bis spätestens zum TODO hier abgegeben werden. Spätere Abgaben sind nicht möglich.

View file

@ -0,0 +1,5 @@
= Project Goal
The aim of this project is to reimplement a subset of the functionality of the secret management tool "#link("https://github.com/hashicorp/vault")[HashiCorp Vault]", which is currently written in Go, in Rust.
Expected benefits of this are an increase in speed and reliability and a higher level of security.
As part of this, a HTTP-API for key-value based secret handling is implemented. Data is stored in a sqlite database.

View file

@ -0,0 +1,2 @@
= Planning and Timeline

View file

@ -0,0 +1,5 @@
= Architecture
#include "./architecture/System-Design.typ"
#include "./architecture/Dynamic-Routing.typ"

View file

@ -0,0 +1,14 @@
= API-Compliance and Testing
One of the goals of this implementation is to provide compatability to the current version of the vault written in Go.
Therefore, the #link("https://github.com/hashicorp/vault-client-go")[HashiCorp Go-client] is used for testing.
To ensure compatability, the #link("https://github.com/hashicorp/vault-client-go/blob/main/openapi.json")[OpenAPI specification] and the #link("https://developer.hashicorp.com/vault")[Vault documentation], both published by HashiCorp, are used as references. They provide information on input parameters, types and return values.
Implemented test cases are:
- Write a secret
- Destroy specific versions of a secret
- Read a secret
- Read a metadata entry
- Write a metadata entry

View file

@ -0,0 +1,67 @@
== Contextual Routing
This issue has previously been adressed as "dynamic routing".
// Problem can be split in two, // TODO
In Axum, routers can be configured using a fluid-like API #footnote[Term borrowed from C\# and OOP].
#box[```rs
#[tokio::main]
async fn main() {
let app = Router::new()
.route("/", get(root))
// .route("/posts", get(get_posts))
.route("/posts/:post_id", get(get_post_by_id))
.route("/posts/:post_id/discussions/+thread_ids", get(get_posts_threads));
todo!("Actually listen")
}
async fn root() {
todo!()
}
async fn get_post_by_id(Path(post_id): Path<String>) -> impl IntoResponse {
todo!()
}
async fn get_posts_threads(Path(post_id): Path<String>, Path(discussion_thread_ids): Path<String>) -> impl IntoResponse {
let discussion_thread_ids = discussion_thread_ids.split("/");
todo!()
}
```]
As it can be seen, a parameter can be extracted from the URL path by using `/:some_name` and can be placed as any infix of the URL: at the beginning, in the middle, or the end.
However, as the path is split by slash characters (`/`), the extracted part must not contain slashes and hence can only be (up to) one part of the path.
Longer parts of the request can be obtained using `+some_name` and can only be placed as a postfix of the URL, but not other types of infixes.
This can be used to obtain the remaining path of the request.
=== Problem Description
+ The API to implement e.g. has the concept of mount points (similar to how filesystems can be mounted on UNIX-like systems).
Mount points can contain multiple slashes.
For example `/v1/some/mount/point/data/some/path/secret` may consist of a mount point `some/mount/point` and further, following routes of the mapped secret engine. //TODO
In this example, `/data` is related to the Key-Value engine and `/some/path/secret` specifies a path within the secret engine instance mounted at the mount point.
This implies a significant problem:
How to determinate what part of the URL displays a mount point, where it is not certan, what the postfix of the URL will be (this problem follows).
+ Another problem is that based on the mount point, the request must be processed by the applicable secret engine.
Based on context, stored on the DBMS, the request must be passed to the secret engine along with the determinated mount point.
=== Solution
Secret Engines have their dedicated router.
The main router has an instance of these routers along with the database pool wrapped within a struct in its state.
The router instances have a reference to the database pool, which is internally wrapped by an Atomic Reference Counter (`Arc`).
Upon a requst, the remaining path is obtained (via `/+mount_path`).
Then, the path is looked up at the database, also requsting the engine type.
If not found, the last last slash character and the following string is removed and looked up again. This is repeated, until the path either is found or has a length of zero, rejecting the request as "404 Not Found".
If found, the router is called with the request and mount path is given to the router as an "Extension".
The `call` #link("https://docs.rs/tower/0.4.13/tower/trait.Service.html#tymethod.call")[(link)]
function which Axum routers inherit from the Tower crate, allows to hand the request over to engine's router.

View file

@ -0,0 +1,35 @@
== System Design
=== Clients
The rvault server is compliant with any client acting in compliance with the hashicorp vault api specification.
To achieve this, tests are written using the official Hashicorp vault go client.
=== Webserver
The Webserver forwards client requests to different routers according to the request paths:
i.e. "/v1/auth" for the authorization router.
Those routers map requests to their corresponding handlers.
Problem:
- TODO describe middleware
=== Engines
Engines are used to manage data. Depending on the engine this can include storage, generation and encryption. Each engine is independent from other engines.
Different engines are addressed via a path, which is passed as a prefix to the secret path. The path for the key-value storage would be .../kv-v2/foo.
In the code, engines are represented by folders which are divided into a logic section, struct sections and a test section.
=== Storage
Data is currently stored in an SQLite databse which is accessed with SQLX without ORM. Later stages of the project will allow for different storage systems to be used.
=== Overview
#figure(
image("../../assets/Design.svg", width: 80%),
caption: [
The acting components of rvault.
],
)

22
documentation.typ Normal file
View file

@ -0,0 +1,22 @@
#import "@preview/arkheion:0.1.0": arkheion, arkheion-appendices
#show: arkheion.with(
title: [Secret Management with rvault],
authors: (
(name: "someone", email: "", affiliation: "HPE"),
(name: "someone", email: "", affiliation: "HPE"),
(name: "(someone)", email: "", affiliation: "HPE"),
),
// Insert your abstract after the colon, wrapped in brackets.
// Example: `abstract: [This is my abstract...]`
abstract: [The goal of this project is to implement the HashiCorp Vault in Rust. During this project we have developed an architecture and started implementation of some cases for key-value-storage. Further development is needed to achieve feature parity with the original version written in Go.],
// keywords: ("First keyword", "Second keyword", "etc."),
date: "2024-06-02",
)
#show link:underline
//#include "./chapters/01-Project-Requirements.typ"
#include "./chapters/02-Project-Goal.typ"
//#include "./chapters/03-Planning-and-Timeline.typ"
#include "./chapters/04-Architecture.typ"
#include "./chapters/05-API-Compliance-and-Testing.typ"

View file

@ -1,16 +0,0 @@
FROM docker.io/library/golang:1.22-alpine3.19 AS builder
WORKDIR /src
COPY go.mod go.sum ./
RUN go mod download
COPY . .
# RUN go build -o /app
RUN go build
CMD go test tests/*
# FROM docker.io/library/alpine:3.19
# COPY --from=builder /app /app
# CMD ["/app"]

View file

@ -1,32 +0,0 @@
module github.com/C0ffeeCode/rvault/go_client
go 1.21.9
// require github.com/hashicorp/vault-client-go v0.4.3
require (
github.com/hashicorp/vault-client-go v0.4.3
github.com/hashicorp/vault/api v1.16.0
)
require (
github.com/cenkalti/backoff/v4 v4.3.0 // indirect
github.com/go-jose/go-jose/v4 v4.0.1 // indirect
github.com/hashicorp/errwrap v1.1.0 // indirect
github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
github.com/hashicorp/go-multierror v1.1.1 // indirect
github.com/hashicorp/go-retryablehttp v0.7.7 // indirect
github.com/hashicorp/go-rootcerts v1.0.2 // indirect
github.com/hashicorp/go-secure-stdlib/parseutil v0.1.6 // indirect
github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 // indirect
github.com/hashicorp/go-sockaddr v1.0.2 // indirect
github.com/hashicorp/hcl v1.0.0 // indirect
github.com/mitchellh/go-homedir v1.1.0 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/ryanuber/go-glob v1.0.0 // indirect
golang.org/x/crypto v0.32.0 // indirect
golang.org/x/net v0.34.0 // indirect
golang.org/x/sys v0.29.0 // indirect
golang.org/x/text v0.21.0 // indirect
golang.org/x/time v0.5.0 // indirect
)

View file

@ -1,81 +0,0 @@
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8=
github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM=
github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE=
github.com/go-jose/go-jose/v4 v4.0.1 h1:QVEPDE3OluqXBQZDcnNvQrInro2h0e4eqNbnZSWqS6U=
github.com/go-jose/go-jose/v4 v4.0.1/go.mod h1:WVf9LFMHh/QVrmqrOfqun0C45tMe3RoiKJMPvgWwLfY=
github.com/go-test/deep v1.0.2 h1:onZX1rnHT3Wv6cqNgYyFOOlgVKJrksuCMCRvJStbMYw=
github.com/go-test/deep v1.0.2/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA=
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ=
github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48=
github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k=
github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M=
github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
github.com/hashicorp/go-retryablehttp v0.7.7 h1:C8hUCYzor8PIfXHa4UrZkU4VvK8o9ISHxT2Q8+VepXU=
github.com/hashicorp/go-retryablehttp v0.7.7/go.mod h1:pkQpWZeYWskR+D1tR2O5OcBFOxfA7DoAO6xtkuQnHTk=
github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc=
github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8=
github.com/hashicorp/go-secure-stdlib/parseutil v0.1.6 h1:om4Al8Oy7kCm/B86rLCLah4Dt5Aa0Fr5rYBG60OzwHQ=
github.com/hashicorp/go-secure-stdlib/parseutil v0.1.6/go.mod h1:QmrqtbKuxxSWTN3ETMPuB+VtEiBJ/A9XhoYGv8E1uD8=
github.com/hashicorp/go-secure-stdlib/strutil v0.1.1/go.mod h1:gKOamz3EwoIoJq7mlMIRBpVTAUn8qPCrEclOKKWhD3U=
github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 h1:kes8mmyCpxJsI7FTwtzRqEy9CdjCtrXrXGuOpxEA7Ts=
github.com/hashicorp/go-secure-stdlib/strutil v0.1.2/go.mod h1:Gou2R9+il93BqX25LAKCLuM+y9U2T4hlwvT1yprcna4=
github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc=
github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A=
github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
github.com/hashicorp/vault-client-go v0.4.3 h1:zG7STGVgn/VK6rnZc0k8PGbfv2x/sJExRKHSUg3ljWc=
github.com/hashicorp/vault-client-go v0.4.3/go.mod h1:4tDw7Uhq5XOxS1fO+oMtotHL7j4sB9cp0T7U6m4FzDY=
github.com/hashicorp/vault/api v1.16.0 h1:nbEYGJiAPGzT9U4oWgaaB0g+Rj8E59QuHKyA5LhwQN4=
github.com/hashicorp/vault/api v1.16.0/go.mod h1:KhuUhzOD8lDSk29AtzNjgAu2kxRA9jL9NAbkFlqvkBA=
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo=
github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk=
github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
golang.org/x/crypto v0.32.0 h1:euUpcYgM8WcP71gNpTqQCn6rC2t6ULUPiOzfWaXVVfc=
golang.org/x/crypto v0.32.0/go.mod h1:ZnnJkOaASj8g0AjIduWNlq2NRxL0PlBrbKVyZ6V/Ugc=
golang.org/x/net v0.34.0 h1:Mb7Mrk043xzHgnRM88suvJFwzVrRfHEHJEl5/71CKw0=
golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k=
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU=
golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk=
golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=

View file

@ -1,26 +0,0 @@
package main
import (
"log/slog"
// "github.com/openbao/openbao"
)
// vault cmd args: >vault server -dev -dev-root-token-id="my-token"
func main() {
slog.Info("run tests in tests/ with >go test")
// // prepare a client with the given base address
// client, err := vault.New(
// vault.WithAddress("http://localhost:8200"),
// vault.WithRequestTimeout(30*time.Second),
// )
// if err != nil {
// log.Fatal(err)
// }
// log.Println("client prepared")
// // authenticate with a root token (insecure)
// if err := client.SetToken("my-token"); err != nil {
// log.Fatal(err)
// }
}

View file

@ -1,123 +0,0 @@
package tests
import (
"context"
"log"
"os"
"reflect"
"testing"
"time"
// "github.com/hashicorp/vault-client-go"
// "github.com/hashicorp/vault-client-go/schema"
vault "github.com/hashicorp/vault/api"
// vault "github.com/openbao/openbao/api/v2"
)
var Client *vault.Client
var ctx context.Context
func TestMain(m *testing.M) {
ctx = context.Background()
var err error
config := vault.DefaultConfig()
config.Address = "http://localhost:8200"
config.Timeout = 30 * time.Second
// prepare a client with the given base address
Client, err = vault.NewClient(config)
if err != nil {
log.Fatalf("unable to initialize Vault client: %v", err)
}
log.Println("client prepared")
// authenticate with a root token (insecure)
Client.SetToken("my-token")
exitCode := m.Run() // run all tests and get code
os.Exit(exitCode)
}
// Requires in-code portions
// func TestUnseal(t *testing.T) {
// abc := []string{
// "eyJpIjpbMCwwLDAsMCwwLDAsMCwwLDAsMCwwLDAsMCwwLDAsMCwwLDAsMCwwLDAsMCwwLDAsMCwwLDAsMCwwLDAsMCwxXSwidiI6WzE4OCw2NiwxMTksMTQ0LDE1OSw3MCw4NiwxMTUsMTIwLDI1MywxMjQsOTYsMTM5LDk0LDQ1LDE2NiwyMTMsMzYsMTE1LDU4LDg5LDE0OCw2MCwyOCwxNTAsMTE2LDU3LDg5LDIwMCw5NywxNDYsMjEzXX0=",
// "eyJpIjpbMCwwLDAsMCwwLDAsMCwwLDAsMCwwLDAsMCwwLDAsMCwwLDAsMCwwLDAsMCwwLDAsMCwwLDAsMCwwLDAsMCwyXSwidiI6WzE1OCwyNDQsNzEsOTUsMTIyLDEzOCwyNDEsMjEzLDQ1LDE1NiwxMTgsNCwxNzYsNiwxNTcsMTkyLDE2MSwxNjEsNDMsMTc1LDE5NSw4NywxODAsMTAwLDE1NiwxNCwxNDgsMTUsMTc4LDkwLDY3LDExOF19",
// }
// for i := range abc {
// if _, err := Client.Sys().Unseal(abc[i]); err != nil {
// t.Fatal("Error unsealing", err)
// }
// }
// }
func kv2Write(t *testing.T, mount string, path string) {
data := map[string]any{
"password1": "123abc",
"password2": "horse horse horse battery staple correct",
}
t.Logf("Attempting to write to KV2 %s path %s:\t", mount, path)
v, err := Client.KVv2(mount).Put(ctx, path, data)
if err != nil {
t.Fatal("ERROR writing secret:\n\t", err)
}
t.Log("Success (unchecked)\n\t", v)
res, err := Client.KVv2(mount).Get(ctx, path)
if err != nil {
t.Fatal("ERROR checking/reading secret (request failed)\n\t", err)
}
if !reflect.DeepEqual(res.Data, data) {
t.Fatal("AAAAH", res.Data)
t.Fatalf("ERROR secret received does not match what was outght to be written.\n\tWritten: %s\n\tReceived: %s\n", data, res.Data)
// t.Fatal("\tWritten: ", newVar.Data)
// t.Fatal("\tReceived:", res.Data.Data)
}
t.Logf("SUCCESS writing to KV2 %s path %s\n", mount, path)
}
func kv2Delete(t *testing.T, mount string, path string) {
err := Client.KVv2(mount).Delete(ctx, path) // currently disregarding modifier options
if err != nil {
log.Fatal("ERROR deleting secret:\n\t", err)
}
res, err := Client.KVv2(mount).Get(ctx, path)
if res != nil || err == nil {
t.Fatal("ERROR checking/reading secret (request failed)\n\t", res, err)
}
t.Logf("SUCCESS deleting KV2 secret %s path %s\n", mount, path)
}
// https://developer.hashicorp.com/vault/api-docs/secret/kv/kv-v2#create-update-secret
// @Philip der Path steht in der KvV2Write Methode
func TestWriteSecret(t *testing.T) {
// Apparently used as a default if mountpath is an empty string (client library)
var mountpath = "/kv-v2"
var mountpath2 = "/some"
// Path foo
t.Logf("Writing to first KV2 engine at %s...", mountpath)
kv2Write(t, mountpath, "foo")
kv2Write(t, mountpath, "bar")
t.Logf("Writing to second KV2 engine at %s...", mountpath2)
kv2Write(t, mountpath2, "foo")
kv2Write(t, mountpath2, "bar")
t.Logf("Deleting...")
kv2Delete(t, mountpath, "foo")
}
// func TestDeleteSecret(t *testing.T) {
// _, err := client.Secrets.KvV2Delete(ctx, "foo") // currently disregarding modifier options
// if err != nil {
// log.Fatal("kv2: Failed to delete secret:\n\t", err)
// }
// }
// func TestReadSecret(t *testing.T) {
// _, err := client.Secrets.KvV2Read(ctx, "bar")
// if err != nil {
// log.Fatal("kv2: Failed to read secret:\n\t", err)
// }
// }

View file

@ -1,8 +0,0 @@
-- Add migration script here
CREATE TABLE secret_engines (
mount_point TEXT PRIMARY KEY NOT NULL,
engine_type TEXT NOT NULL
);
INSERT INTO secret_engines (mount_point, engine_type) VALUES ('/kv-v2', 'kv_v2'), ('/some', 'kv_v2');

View file

@ -1,32 +0,0 @@
-- Add migration script here
CREATE TABLE kv2_metadata (
engine_path TEXT NOT NULL,
secret_path TEXT NOT NULL,
cas_required INTEGER NOT NULL, -- no bool datatype in sqlite
created_time TIMESTAMP NOT NULL,
delete_version_after TEXT, -- Maybe NOT NULL
max_versions INTEGER NOT NULL,
-- current_version INTEGER NOT NULL,
-- oldest_version INTEGER NOT NULL,
updated_time TIMESTAMP NOT NULL,
custom_data TEXT,
PRIMARY KEY (engine_path, secret_path)
);
CREATE TABLE kv2_secret_version (
engine_path TEXT NOT NULL,
secret_path TEXT NOT NULL,
version_number INTEGER NOT NULL CHECK ( version_number > 0 ),
created_time DATETIME NOT NULL,
deletion_time DATETIME,
encrypted_data BLOB NOT NULL,
nonce BLOB NOT NULL CHECK ( length(nonce) = 12 ),
PRIMARY KEY (engine_path, secret_path, version_number),
FOREIGN KEY (engine_path, secret_path) REFERENCES kv2_metadata(engine_path, secret_path)
);

View file

@ -1,8 +0,0 @@
-- Sealing Key
CREATE TABLE root_key (
version INTEGER PRIMARY KEY CHECK ( version = 1 ),
encrypted_key BLOB NOT NULL,
nonce BLOB,
type TEXT NOT NULL CHECK ( type IN ('dev_only', 'simple', 'shamir') )
);

View file

@ -1,25 +0,0 @@
CREATE TABLE identity (
id TEXT PRIMARY KEY NOT NULL,
name TEXT NOT NULL
);
CREATE TABLE service_token_role_membership (
role_name TEXT NOT NULL,
token_id TEXT NOT NULL
REFERENCES service_token(id)
ON DELETE CASCADE
ON UPDATE CASCADE,
PRIMARY KEY (role_name, token_id)
);
CREATE TABLE service_token (
id TEXT PRIMARY KEY NOT NULL,
key TEXT NOT NULL,
expiry INTEGER,
parent_id TEXT NULL REFERENCES service_token(id)
ON DELETE NO ACTION
ON UPDATE CASCADE,
identity_id TEXT NULL REFERENCES identity(id)
ON DELETE CASCADE
ON UPDATE CASCADE
);

File diff suppressed because it is too large Load diff

50539
openapi.json

File diff suppressed because it is too large Load diff

21
rvault_description.txt Normal file
View file

@ -0,0 +1,21 @@
Secret Management with rvault
Groupmembers:
[redacted]
Description:
The goal is to rewrite a part of the Hashicorp vault in Rust.
It should be compatible with the current version of the vault an its clients.
Therefore a vault client written in go is used to perform tests.
During this project only a small part will be implemented.
It will be further developed as an open-source project.
Implemented features include:
- Design of the architecture
- Implementation of dynamic routing to allow for exchangeable secret engines
- Basic kv-store
The main development branch is the engine-kv branch.
To execute the tests navigate to rvault/go_client/tests and execute "go test -v ./..."
To clear the test cache use "go clean -testcache"

View file

@ -1,14 +0,0 @@
pub mod auth_extractor;
pub(crate) mod token;
use crate::auth::token::*;
use crate::storage::DbPool;
use axum::Router;
/// Authentication routes
pub fn auth_router(pool: DbPool) -> Router<DbPool> {
// The token auth router handles all token-related authentication routes
Router::new()
.nest("/token", token_auth_router(pool.clone()))
.with_state(pool)
}

View file

@ -1,66 +0,0 @@
use crate::auth::token::{TokenDTO, get_roles_from_token, get_token_from_key};
use crate::storage::DbPool;
use axum::body::Body;
use axum::extract::FromRequestParts;
use axum::http::request::Parts;
use axum::http::{HeaderMap, Request, StatusCode, header};
use std::fmt::Debug;
// Currently unused but for usage in the future
#[allow(unused)]
/// AuthInfo is an extractor that retrieves authentication information from the request.
#[derive(Debug)]
pub struct AuthInfo {
token: TokenDTO,
roles: Vec<String>,
}
impl FromRequestParts<DbPool> for AuthInfo {
type Rejection = StatusCode;
/// Extracts authentication information from the request parts.
async fn from_request_parts(
parts: &mut Parts,
state: &DbPool,
) -> Result<Self, Self::Rejection> {
let header = &parts.headers;
inspect_with_header(state, header).await
}
}
// Currently unused but for usage in the future
#[allow(unused)]
/// Extracts the headers from request and returns the result from inspect_with_header function.
pub async fn inspect_req(state: &DbPool, req: &Request<Body>) -> Result<AuthInfo, StatusCode> {
let header = req.headers();
inspect_with_header(state, header).await
}
/// Inspects the request headers and extracts authentication information.
/// Returns an `AuthInfo` struct containing the token and roles if successful.
/// If the authorization header is missing or invalid, it returns a `StatusCode::UNAUTHORIZED`.
///
/// This function is intentionally separated so it can be used from
/// within the Axum extractor as well as in other functions.
pub async fn inspect_with_header(
state: &DbPool,
header: &HeaderMap,
) -> Result<AuthInfo, StatusCode> {
let auth_header = header
.get(header::AUTHORIZATION)
.and_then(|value| value.to_str().ok());
match auth_header {
Some(auth_value) => {
let token = get_token_from_key(auth_value, state).await;
if token.is_err() {
return Err(StatusCode::UNAUTHORIZED);
}
let token = token.unwrap();
let roles = get_roles_from_token(&token, state).await;
Ok(AuthInfo { token, roles })
}
None => Err(StatusCode::UNAUTHORIZED),
}
}

View file

@ -1,286 +0,0 @@
// There are some placeholder functions, that will have to be implemented before the first release.
// They are marked with `todo!()` to indicate that they need to be implemented.
// We want to keep these functions in the codebase.
// That is why we choose to suppress unused warnings for now.
// TODO
#![allow(unused)]
use crate::storage::DbPool;
use axum::extract::State;
use axum::http::StatusCode;
use axum::response::{IntoResponse, Response};
use axum::routing::post;
use axum::{Json, Router};
use log::error;
use rand::{Rng, distributions::Alphanumeric};
use serde::{Deserialize, Serialize};
use sqlx::Error;
use uuid::Uuid;
#[derive(Debug, Serialize)]
pub struct IdentityDTO {
id: String,
name: String,
}
#[derive(Debug)]
pub struct TokenDTO {
key: String,
id: String,
identity_id: Option<String>,
parent_id: Option<String>,
expiry: Option<i64>,
}
#[derive(Debug)]
pub struct TokenRoleMembershipDTO {
role_name: String,
token_id: String,
}
/// Represents a request body for the `/auth/token/lookup` endpoint.
#[derive(Deserialize)]
struct RequestBodyPostLookup {
token: String,
}
/// Represents the response body for the `/auth/token/lookup` endpoint.
#[derive(Serialize)]
struct TokenLookupResponse {
id: String,
type_name: String,
roles: Vec<String>,
}
/// Represents an error response for the API.
#[derive(Serialize)]
struct ErrorResponse {
error: String,
}
/// Generates a random string of the specified length using alphanumeric characters.
// TODO: Make string generation secure
fn get_random_string(len: usize) -> String {
rand::thread_rng()
.sample_iter(&Alphanumeric)
.take(len)
.map(char::from)
.collect()
}
/// Creates a root token if none exists in the database.
/// Returns true if a new root token was created, false if one already exists.
pub async fn create_root_token_if_none_exist(pool: &DbPool) -> bool {
// Check if a root token already exists
let exists = sqlx::query!(
r#"SELECT service_token.* FROM service_token, service_token_role_membership
WHERE service_token.id = service_token_role_membership.token_id AND
service_token_role_membership.role_name = 'root'
LIMIT 1"#
)
.fetch_one(pool)
.await
.is_ok();
if exists {
return false;
}
// If no root token exists, create one
let result = create_root_token(pool).await;
if result.is_err() {
let error = result.err().unwrap();
// Log the error and panic
error!("create_root_token failed: {error:?}");
panic!("create_root_token failed: {error:?}");
}
// If successful, print the root token. This will only happen once.
println!("\n\nYour root token is: {}", result.unwrap());
println!("It will only be displayed once!\n\n");
true
}
/// Creates a root token in the database.
async fn create_root_token(pool: &DbPool) -> Result<String, Error> {
let id = Uuid::new_v4().to_string();
let key = "s.".to_string() + &get_random_string(24);
// Insert the root token into the database
let result = sqlx::query!(r#"
INSERT INTO service_token (id, key) VALUES ($1, $2);
INSERT INTO service_token_role_membership (token_id, role_name) VALUES ($3, 'root');
"#, id, key, id).execute(pool).await;
// If the insert was successful, return the key
if result.is_ok() {
return Ok(key);
}
// Else, return the error
Err(result.unwrap_err())
}
/// Gets the current time in seconds since unix epoch
fn get_time_as_int() -> i64 {
std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap()
.as_secs() as i64
}
/// Gets the type of token. (The first character of the key always specifies the type)
fn get_token_type(token: &TokenDTO) -> Result<String, &str> {
Ok(match token.key.clone().chars().next().unwrap_or('?') {
's' => "service",
'b' => "batch",
'r' => "recovery",
_ => {
error!("Unsupported token type");
return Err("Unsupported token type");
}
}
.to_string())
}
/// Retrieves a token from the database using its key.
/// If the token is found and not expired, it returns the token.
/// Else, it returns an error.
pub async fn get_token_from_key(token_key: &str, pool: &DbPool) -> Result<TokenDTO, Error> {
let time = get_time_as_int();
sqlx::query_as!(
TokenDTO,
r#"SELECT * FROM 'service_token' WHERE key = $1 AND (expiry IS NULL OR expiry > $2) LIMIT 1"#,
token_key, time).fetch_one(pool).await
}
/// Retrieves the roles associated with a given token from the database.
/// If the token does not exist, it returns an empty vector.
pub async fn get_roles_from_token(token: &TokenDTO, pool: &DbPool) -> Vec<String> {
let result = sqlx::query_as!(
TokenRoleMembershipDTO,
r#"SELECT * FROM 'service_token_role_membership' WHERE token_id = $1"#,
token.id
)
.fetch_all(pool)
.await;
result
.unwrap_or(Vec::new())
.iter()
.map(|r| r.role_name.to_string())
.collect()
}
/// Return a router, that may be used to route traffic to the corresponding handlers
pub fn token_auth_router(pool: DbPool) -> Router<DbPool> {
Router::new()
.route("/lookup", post(post_lookup))
.with_state(pool)
}
/// Handles the `/auth/token/lookup` endpoint.
/// Retrieves the token and its associated roles from the database using the provided token key.
/// The output format does not yet match the openBao specification and is for testing only!
async fn post_lookup(
State(pool): State<DbPool>,
Json(body): Json<RequestBodyPostLookup>,
) -> Response {
let token_str = body.token;
// Validate the token string
match get_token_from_key(&token_str, &pool).await {
// If the token is found, retrieve its type and roles
Ok(token) => {
let type_name = get_token_type(&token).unwrap_or_else(|_| String::from("Unknown"));
let roles = get_roles_from_token(&token, &pool).await;
let resp = TokenLookupResponse {
id: token.id,
type_name,
roles,
};
// Return the token information as a JSON response
(StatusCode::OK, axum::Json(resp)).into_response()
}
// If the token is not found, return a 404 Not Found error
Err(e) => {
error!("Failed to retrieve token: {e:?}");
let err = ErrorResponse {
error: "Failed to retrieve token".to_string(),
};
(StatusCode::NOT_FOUND, axum::Json(err)).into_response()
}
}
}
//
// The following functions are placeholders for the various token-related operations.
//
async fn get_accessors() -> &'static str {
todo!("not implemented")
}
async fn post_create() -> &'static str {
todo!("not implemented")
}
async fn post_create_orphan() -> &'static str {
todo!("not implemented")
}
async fn post_create_role() -> &'static str {
todo!("not implemented")
}
async fn get_lookup() -> &'static str {
todo!("not implemented")
}
async fn get_lookup_self() -> &'static str {
todo!("not implemented")
}
async fn post_lookup_self() -> &'static str {
todo!("not implemented")
}
async fn post_renew() -> &'static str {
todo!("not implemented")
}
async fn post_renew_accessor() -> &'static str {
todo!("not implemented")
}
async fn post_renew_self() -> &'static str {
todo!("not implemented")
}
async fn post_revoke() -> &'static str {
todo!("not implemented")
}
async fn post_revoke_accessor() -> &'static str {
todo!("not implemented")
}
async fn post_revoke_orphan() -> &'static str {
todo!("not implemented")
}
async fn post_revoke_self() -> &'static str {
todo!("not implemented")
}
async fn get_roles() -> &'static str {
todo!("not implemented")
}
async fn get_role_by_name() -> &'static str {
todo!("not implemented")
}
async fn post_role_by_name() -> &'static str {
todo!("not implemented")
}
async fn delete_role_by_name() -> &'static str {
todo!("not implemented")
}
async fn post_tidy() -> &'static str {
todo!("not implemented")
}

View file

@ -1,23 +0,0 @@
use axum::{
Json,
body::Body,
http::StatusCode,
response::{IntoResponse, Response},
};
use serde::Serialize;
#[derive(Debug, Serialize)]
/// Defined by the HTTP API
pub struct HttpError {
pub errors: Vec<String>,
}
impl HttpError {
pub fn multiple_errors(status_code: StatusCode, errors: Vec<String>) -> Response<Body> {
(status_code, Json(HttpError { errors })).into_response()
}
pub fn simple(status_code: StatusCode, error: impl ToString) -> Response<Body> {
HttpError::multiple_errors(status_code, vec![error.to_string(); 1])
}
}

View file

@ -1,115 +0,0 @@
pub mod kv;
use axum::{
Extension, Router,
body::Body,
extract::{Request, State},
http::{StatusCode, Uri},
response::{IntoResponse, Response},
};
use log::*;
use tower::Service;
use crate::{common::HttpError, storage::DbPool};
#[derive(Clone)]
/// State to be used to store the database pool
/// and the routers for each engine.
struct EngineMapperState {
pool: DbPool,
kv_v2: Router,
}
#[derive(Clone)]
struct EnginePath(String);
/// Secret engine router.
/// Dynamically puts requests into routers depending on database content.
pub fn secrets_router(pool: DbPool) -> Router<DbPool> {
// State containing the pool and engine routers
let state = EngineMapperState {
pool: pool.clone(),
kv_v2: kv::kv_router(pool.clone()),
};
// Problem solved via fallback route
Router::new().fallback(engine_handler).with_state(state)
}
/// Map the request to the appropriate engine and call the router
async fn engine_handler(
// State(pool): State<DatabaseDriver>,
State(engines): State<EngineMapperState>,
req: Request,
) -> Response<Body> {
if let Some((mount_path, engine_type)) = map_mount_points(req.uri(), &engines.pool).await {
info!("Found mount point {mount_path} of type {engine_type}");
// Match the engine type to the appropriate router
match engine_type.as_str() {
"kv_v2" => call_router(engines.kv_v2, mount_path, req).await,
// Mount point exists but the type is unknown
_ => unknown_engine(engine_type).into_response(),
}
} else {
// Otherwise, the mount path could not be found
HttpError::simple(StatusCode::NOT_FOUND, "Secret engine mount path not found")
}
}
/// Helper function to call the appropriate router with the request
async fn call_router(engine: Router, mount_path: String, mut req: Request) -> Response {
let rui = req.uri().path().replace(&mount_path, "").parse().unwrap();
*req.uri_mut() = rui;
let mount_path = EnginePath(mount_path);
engine
.layer(Extension(mount_path))
.call(req)
.await
.into_response()
}
/// HTTP error response for unknown engine types
/// Occurs when the mount path is found in the database
/// but the registered is unknown
fn unknown_engine(engine_type: String) -> impl IntoResponse {
error!("Engine type {engine_type} not implemented");
HttpError::simple(
StatusCode::INTERNAL_SERVER_ERROR,
format!("Engine type {engine_type} not implemented"),
)
}
/// Returns the mount path and engine type for the request,
/// if the mount path is registed at the database
async fn map_mount_points(req: &Uri, pool: &DbPool) -> Option<(String, String)> {
let mut mount_path_fragments: Vec<&str> = req.path().split('/').collect();
// Find longest matching existing mount path for the request
for _ in 1..mount_path_fragments.len() {
let path_str = mount_path_fragments.join("/");
let record = sqlx::query!(
"SELECT engine_type FROM secret_engines WHERE mount_point = $1",
path_str
)
.fetch_optional(pool)
.await;
// Path found
if let Ok(Some(row)) = record {
trace!(
"Mount path {} found with {:?} engine for route request: {}",
mount_path_fragments.join("/"),
row.engine_type,
req.path()
);
return Some((mount_path_fragments.join("/"), row.engine_type));
} else {
// Shorten the mount path to find a shorter match
mount_path_fragments.pop();
}
}
// If no mount path is found, return None
None
}

View file

@ -1,46 +0,0 @@
mod data;
mod meta;
mod structs;
// #[cfg(test)]
// mod tests;
use crate::storage::DbPool;
use axum::{Router, routing::*};
pub fn kv_router(pool: DbPool) -> Router {
Router::new()
.route("/config", get(get_config))
.route("/config", post(post_config))
.route("/data/{*path}", get(data::get_data))
// .route("/:mount_path/data/*path/", get(get_data))
.route("/data/{*path}", post(data::post_data))
// Why does HC V SDK expect PUT instead of POST - neither in the docs nor spec
.route("/data/{*path}", put(data::post_data))
.route("/data/{*path}", delete(data::delete_data))
.route("/delete/{*path}", post(meta::delete_path))
.route("/destroy/{*path}", post(meta::destroy_path))
.route("/metadata/{*path}", get(meta::get_meta))
// .route("/:mount_path/metadata/*path/", get(get_meta))
.route("/metadata/{*path}", post(meta::post_meta))
.route("/metadata/{*path}", delete(meta::delete_meta))
.route("/subkeys/{*path}", get(get_subkeys))
.route("/undelete/{*path}", post(post_undelete))
.with_state(pool)
}
async fn get_config() -> &'static str {
todo!("not implemented")
}
async fn post_config() -> &'static str {
todo!("not implemented")
}
async fn get_subkeys() -> &'static str {
todo!("not implemented")
}
async fn post_undelete() -> &'static str {
todo!("not implemented")
}

View file

@ -1,253 +0,0 @@
// There are some placeholder functions, that will have to be implemented before the first release.
// They are marked with `todo!()` to indicate that they need to be implemented.
// We want to keep these functions in the codebase.
// That is why we choose to suppress unused warnings for now.
// TODO
#![allow(unused)]
use super::structs::KvV2WriteRequest;
use crate::{
DbPool,
common::HttpError,
engines::{
EnginePath,
kv::structs::{KvSecretData, KvSecretRes, KvV2WriteResponse, Wrapper},
},
storage::sealing::Secret,
};
use axum::{
Extension, Json,
extract::{Path, Query, State},
http::StatusCode,
response::{IntoResponse, NoContent, Response},
};
use log::{debug, error, info, warn};
use serde::Deserialize;
use time::{OffsetDateTime, UtcDateTime};
#[derive(Deserialize)]
pub struct GetDataQuery {
#[serde(default)]
/// Version of secret requested to be read.
/// Default `0`, to get the most recent version.
pub version: u32,
}
/// Unluckily needed as `sqlx::query_as!()` does not support FromRow derivations
struct SecretDataInternal {
pub created_time: OffsetDateTime,
pub deletion_time: Option<OffsetDateTime>,
pub version_number: i64,
pub secret_path: String,
pub nonce: Vec<u8>,
pub encrypted_data: Vec<u8>,
}
impl SecretDataInternal {
pub async fn into_external(self) -> KvSecretData {
let secret = Secret::new(self.encrypted_data, self.nonce).decrypt().await;
KvSecretData {
created_time: self.created_time,
deletion_time: self.deletion_time,
version_number: self.version_number,
secret_path: self.secret_path,
secret_data: secret.unwrap(),
}
}
}
pub async fn get_data(
State(pool): State<DbPool>,
Query(params): Query<GetDataQuery>,
Path(path): Path<String>,
Extension(EnginePath(engine_path)): Extension<EnginePath>,
) -> Result<Response, ()> {
debug!("Get request: Engine: {engine_path}, path: {path}",);
let res = if params.version != 0 {
// With specific version
sqlx::query_as!(
SecretDataInternal,
r#"SELECT nonce, encrypted_data, created_time, deletion_time, version_number, secret_path
FROM kv2_secret_version WHERE engine_path = $1 AND secret_path = $2 AND deletion_time IS NULL
AND version_number = $3"#,
engine_path, path, params.version).fetch_one(&pool).await
} else {
// Without specific version
sqlx::query_as!(
SecretDataInternal,
r#"SELECT nonce, encrypted_data, created_time, deletion_time, version_number, secret_path
FROM kv2_secret_version WHERE engine_path = $1 AND secret_path = $2 AND deletion_time IS NULL
ORDER BY version_number DESC LIMIT 1"#,
engine_path, path).fetch_one(&pool).await
};
match res {
Ok(secret_content) => {
let secret_content = secret_content.into_external().await;
let inner = secret_content.secret_data;
let data = Wrapper {
data: serde_json::from_str(&inner).unwrap(),
};
let return_secret = KvSecretRes {
data,
options: None,
version: Some(secret_content.version_number),
};
let return_secret = Json(return_secret);
info!("{return_secret:?}");
Ok(return_secret.into_response())
}
Err(e) => match e {
sqlx::Error::RowNotFound => {
warn!("Secret not found (could be correct behavior) {e:?}");
Ok(HttpError::simple(
StatusCode::NOT_FOUND,
"Secret not found within kv2 engine",
))
}
_ => panic!("Unhandled error: {e:?}"),
},
}
}
pub async fn post_data(
State(pool): State<DbPool>,
Path(kv_path): Path<String>,
Extension(EnginePath(engine_path)): Extension<EnginePath>,
Json(secret): Json<KvV2WriteRequest>,
) -> Result<Response, ()> {
debug!(
"Engine: {}, Secret: {}, Version: {:?}, path: {}",
engine_path,
kv_path,
secret.version, //.unwrap_or(0),
kv_path
);
let created_time = time::UtcDateTime::now();
let ts = created_time.unix_timestamp();
let content = serde_json::to_string(&secret.data).unwrap();
let Secret {
nonce,
protected_data,
} = Secret::encrypt(&content).await.unwrap();
let nonce = nonce.as_slice();
let mut tx = pool.begin().await.unwrap();
let _ = sqlx::query!("
INSERT INTO kv2_metadata (engine_path, secret_path, cas_required, created_time, max_versions, updated_time)
VALUES ($1, $2, 0, $3, 100, $3)
ON CONFLICT(engine_path, secret_path) DO NOTHING;
", engine_path, kv_path, ts).execute(&mut *tx).await.unwrap();
let res_r = sqlx::query_file!(
"src/engines/kv/post_secret.sql",
engine_path,
kv_path,
nonce,
protected_data,
ts,
secret.version,
)
.fetch_one(&mut *tx)
.await
.unwrap();
tx.commit().await.expect("FAILED TO WRITE TX!");
let res = KvV2WriteResponse {
created_time: created_time.into(),
custom_metadata: None,
deletion_time: None,
destroyed: false,
version: res_r.version_number,
};
Ok(Json(res).into_response())
}
/// TODO: soft delete the secret version at path. can be undone with undelete_secret
// https://developer.hashicorp.com/vault/api-docs/secret/kv/kv-v2#delete-latest-version-of-secret
// https://developer.hashicorp.com/vault/api-docs/secret/kv/kv-v2#delete-secret-versions
pub async fn delete_data(
State(pool): State<DbPool>,
Path(path): Path<String>,
Extension(EnginePath(engine_path)): Extension<EnginePath>,
) -> Result<Response, Response> {
debug!("Secret: {path}, path: {path}");
let del_time = UtcDateTime::now().unix_timestamp();
let mut tx = pool.begin().await.unwrap();
// TODO: Find a better way
let latest_version = sqlx::query!(
r#"
SELECT version_number AS latest_version FROM kv2_secret_version
WHERE engine_path = $1 AND secret_path = $2 AND deletion_time IS NULL
ORDER BY version_number DESC LIMIT 1"#,
engine_path,
path,
)
.fetch_optional(&mut *tx)
.await
.unwrap();
let latest_version = match latest_version {
Some(v) => v.latest_version,
None => {
return Err(HttpError::simple(
StatusCode::NOT_FOUND,
"No secret version found which could be deleted",
));
}
};
let u = sqlx::query!(
r#"
UPDATE kv2_secret_version
SET deletion_time = $4
WHERE engine_path = $1 AND secret_path = $2
AND version_number = $3
"#,
engine_path,
path,
latest_version,
del_time
)
.execute(&mut *tx)
.await;
if let Err(e) = u {
error!(
"Strange - a version to be deleted has been found but could not be found to set deletion.\n\t{e:?}"
);
// Not committed transactions will be aborted upon drop
// tx.rollback().await.unwrap();
return Err(HttpError::simple(
StatusCode::INTERNAL_SERVER_ERROR,
"A version to be deleted was found but could not be deleted",
));
}
tx.commit().await.unwrap();
info!("Secret {path} version {latest_version} of {engine_path} engine deleted! {u:?}");
Ok(NoContent.into_response())
}
pub async fn patch_data(
State(pool): State<DbPool>,
Path(kv_path): Path<String>,
Extension(EnginePath(engine_path)): Extension<EnginePath>,
Json(secret): Json<KvV2WriteRequest>,
) -> &'static str {
todo!("not implemented")
}

View file

@ -1,25 +0,0 @@
WITH latest AS (
SELECT version_number AS version
FROM kv2_secret_version
WHERE engine_path = '/kv-v2' AND secret_path = 'foo' AND deletion_time IS NULL
ORDER BY version_number DESC
LIMIT 1
),
update_deleted AS (
UPDATE kv2_secret_version
SET deletion_time = CURRENT_TIMESTAMP
WHERE engine_path = '/kv-v2' AND secret_path = 'foo'
AND version_number = (SELECT version FROM latest)
RETURNING version_number AS deleted_version
),
new_latest AS (
SELECT version_number AS new_latest_version
FROM kv2_secret_version
WHERE engine_path = '/kv-v2' AND secret_path = 'foo' AND deletion_time IS NULL
ORDER BY version_number DESC
LIMIT 1
)
SELECT
(SELECT deleted_version FROM update_deleted) AS deleted_version,
(SELECT new_latest_version FROM new_latest) AS new_latest_version;

View file

@ -1,33 +0,0 @@
// There are some placeholder functions, that will have to be implemented before the first release.
// They are marked with `todo!()` to indicate that they need to be implemented.
// We want to keep these functions in the codebase.
// That is why we choose to suppress unused warnings for now.
// TODO
#![allow(unused)]
use crate::storage::DbPool;
use axum::extract::{Path, State};
pub async fn delete_path() -> &'static str {
todo!("not implemented")
}
pub async fn destroy_path() -> &'static str {
todo!("not implemented")
}
pub async fn get_meta() -> &'static str {
todo!("not implemented")
}
pub async fn post_meta(
State(pool): State<DbPool>,
Path((mount_path, kv_path)): Path<(String, String)>,
body: String,
) -> &'static str {
todo!("not implemented")
}
pub async fn delete_meta() -> &'static str {
todo!("not implemented")
}

View file

@ -1,19 +0,0 @@
WITH latest_version AS (
SELECT MAX(version_number) AS max_version
FROM kv2_secret_version
WHERE engine_path = $1 AND secret_path = $2 -- engine_path AND secret_path
)
INSERT INTO kv2_secret_version (engine_path, secret_path, nonce, encrypted_data, created_time, version_number)
VALUES (
$1, -- engine_path
$2, -- secret_path
$3, -- nonce
$4, -- encrypted_data
$5, -- created_time
CASE -- Use provided version if given
WHEN $6 IS NOT NULL THEN $6 -- version_number (optional)
ELSE COALESCE((SELECT max_version FROM latest_version) + 1, 1) -- otherwise 1
END -- version_number logic
)
RETURNING version_number;

View file

@ -1,123 +0,0 @@
// There are some placeholder functions, that will have to be implemented before the first release.
// They are marked with `todo!()` to indicate that they need to be implemented.
// We want to keep these functions in the codebase.
// That is why we choose to suppress unused warnings for now.
#![allow(unused)]
use serde::{Deserialize, Serialize};
use std::{collections::HashMap, vec};
use time::{OffsetDateTime, UtcDateTime, serde::rfc3339};
// #[derive(Serialize, Deserialize, Debug)]
// pub struct KvSecretData {
// pub secret_data: String,
// #[serde(with = "rfc3339")]
// pub created_time: UtcDateTime,
// #[serde(with = "rfc3339::option")]
// pub deletion_time: Option<UtcDateTime>,
// pub version_number: i64,
// pub secret_path: String,
// }
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct KvSecretData {
pub secret_data: String,
#[serde(with = "rfc3339")]
pub created_time: OffsetDateTime,
#[serde(with = "rfc3339::option")]
pub deletion_time: Option<OffsetDateTime>,
pub version_number: i64,
pub secret_path: String,
}
// impl From<KvSecretDataDBO> for KvSecretData {
// fn from(value: KvSecretDataDBO) -> Self {
// Self {
// secret_data: value.secret_data,
// created_time: value.created_time.to_offset(UtcOffset::UTC),
// deletion_time: value.deletion_time.map(|v| v.to_utc()),
// version_number: value.version_number,
// secret_path: value.secret_path,
// }
// }
// }
#[derive(serde::Serialize, Deserialize, Debug)]
pub struct Wrapper<T> {
pub data: T,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct KvSecretRes {
/// Map (required)
pub data: Wrapper<serde_json::Value>,
/// Map (optional), may contain `cas` integer
/// Set the `cas` value to use a Check-And-Set operation
// #[serde_as(as = "serde_with::EnumMap")]
pub options: Option<HashMap<String, String>>,
// Version does not exist for create/update operations
pub version: Option<i64>,
// TODO add all fields
}
#[derive(Deserialize)]
pub struct KvV2WriteRequest {
pub data: serde_json::Value,
pub options: Option<serde_json::Value>,
pub version: Option<i32>,
}
#[derive(Serialize, Debug)]
pub struct KvV2WriteResponse {
#[serde(with = "rfc3339")]
pub created_time: OffsetDateTime,
pub custom_metadata: Option<HashMap<String, String>>,
#[serde(with = "rfc3339::option")]
pub deletion_time: Option<OffsetDateTime>,
pub destroyed: bool,
pub version: i64,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct VersionMeta {
pub created_time: UtcDateTime,
pub deletion_time: Option<UtcDateTime>, // optional deletion time
pub destroyed: bool,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct SecretMeta {
pub cas_required: bool,
pub created_time: UtcDateTime,
pub current_version: i64,
/// In Hashicorp:
/// If not set, the backend's configured delete_version_after is used.
/// Cannot be greater than the backend's delete_version_after
// TODO: implement duration type
pub delete_version_after: String,
// TODO https://developer.hashicorp.com/vault/docs/concepts/duration-format
pub max_versions: i64,
pub oldest_version: i64,
pub updated_time: UtcDateTime,
/// User-provided key-value pairs that are used to describe arbitrary and version-agnostic information about a secret.
pub custom_metadata: Option<HashMap<String, String>>,
pub versions: Vec<VersionMeta>,
}
impl Default for SecretMeta {
fn default() -> Self {
let current = UtcDateTime::now();
SecretMeta {
cas_required: false,
created_time: current,
current_version: 1,
delete_version_after: "24h00m00s".to_string(),
max_versions: 10,
oldest_version: 1,
updated_time: current,
custom_metadata: None,
versions: vec![],
}
}
}

View file

@ -1,7 +0,0 @@
use axum::Router;
use crate::storage::DbPool;
pub fn identity_router(pool: DbPool) -> Router<DbPool> {
Router::new().with_state(pool)
}

View file

@ -1,129 +0,0 @@
#![forbid(unsafe_code)]
// // There are some placeholder functions, that will have to be implemented before the first release.
// // They are marked with `todo!()` to indicate that they need to be implemented.
// // We want to keep these functions in the codebase.
// // That is why we choose to suppress unused warnings for now.
// #![allow(unused)]
use crate::common::HttpError;
use axum::{
Router,
extract::Request,
http::StatusCode,
middleware::{self, Next},
response::{IntoResponse, Response},
routing::get,
};
use log::*;
use std::{env, net::SocketAddr, str::FromStr};
use storage::DbPool;
use tokio::{net::TcpListener, signal};
mod auth;
mod common;
mod engines;
mod identity;
mod storage;
mod sys;
#[tokio::main]
async fn main() {
// NOTE: Rethink choice of environment variables in regards to security in the future
let _ = dotenvy::dotenv();
env_logger::init();
// Listen on all IPv4 and IPv6 interfaces on port 8200 by default
let listen_addr = env::var("LISTEN_ADDR").unwrap_or("[::]:8200".to_string());
let listen_addr = SocketAddr::from_str(&listen_addr).expect("Failed to parse LISTEN_ADDR");
let db_url = env::var("DATABASE_URL").expect("DATABASE_URL must be set");
let pool = storage::create_pool(db_url).await;
// build our application with routes
let app = Router::new()
.route("/", get(root))
.nest("/v1/auth", auth::auth_router(pool.clone()))
.nest("/v1/identity", identity::identity_router(pool.clone()))
.nest("/v1/sys", sys::sys_router(pool.clone()))
.nest("/v1", engines::secrets_router(pool.clone())) // mountable secret backends
.fallback(fallback_route_unknown)
.layer(middleware::from_fn(set_default_content_type_json))
.with_state(pool.clone());
if !storage::sealing::prepare_unseal(&pool).await {
storage::sealing::init_default(&pool).await;
}
auth::token::create_root_token_if_none_exist(&pool).await;
warn!("Listening on {listen_addr}");
// Start listening
let listener = TcpListener::bind(listen_addr).await.unwrap();
axum::serve(listener, app)
.with_graceful_shutdown(shutdown_signal(pool))
.await
.unwrap();
}
/// Middleware setting unspecified `Content-Type`s to json since this is done by client libraries.
/// Axum's [axum::extract::Json] rejects extraction attempts without json content type.
async fn set_default_content_type_json(
mut req: Request,
next: Next,
) -> Result<impl IntoResponse, Response> {
if req.headers().get("content-type").is_none() {
let headers = req.headers_mut();
headers.insert("content-type", "application/json".parse().unwrap());
}
Ok(next.run(req).await)
}
async fn shutdown_signal(pool: DbPool) {
let ctrl_c = async {
signal::ctrl_c()
.await
.expect("failed to install Ctrl+C handler");
};
#[cfg(unix)]
let terminate = async {
signal::unix::signal(signal::unix::SignalKind::terminate())
.expect("failed to install signal handler")
.recv()
.await;
};
#[cfg(not(unix))]
let terminate = std::future::pending::<()>();
tokio::select! {
_ = ctrl_c => {},
_ = terminate => {},
}
warn!("Closing database pool");
pool.close().await;
}
/// Fallback route for unknown routes
///
/// Note: `/v1/*` is handled by [`engines::secrets_router`]
async fn fallback_route_unknown(req: Request) -> Response {
log::error!(
"Route not found: {} {}, payload {:?}",
req.method(),
req.uri(),
req.body()
);
HttpError::simple(StatusCode::NOT_FOUND, "Route not implemented")
}
/// Basic handler that responds with a static string
async fn root() -> &'static str {
info!("Hello world");
"Hello, World!"
}

View file

@ -1,41 +0,0 @@
pub mod sealing;
use std::{fs::File, path::Path};
use log::*;
use sqlx::{Pool, Sqlite, sqlite::SqlitePoolOptions};
pub(crate) type DbType = Sqlite;
pub(crate) type DbPool = Pool<DbType>;
/// Creates a SQLx SQLite database pool.
/// If nonexistent, it creates a new SQLite file.
///
/// Note: rvault uses compile-time queries.
/// Hence, during development a migrated SQLite file is required.
/// Use `cargo sqlx database reset` if required.
/// Otherwise, set the env var `SQLX_OFFLINE=true` during compilation (not helpful for development).
pub async fn create_pool(db_url: String) -> DbPool {
// Create SQLite database file if it does not exist
if db_url.starts_with("sqlite:") && db_url != ("sqlite::memory:") {
let path = db_url.replace("sqlite:", "");
if !Path::new(&path).exists() {
warn!("Sqlite database does not exist, creating file {path}");
File::create(&path).expect("Failed to create database file");
}
}
let pool = SqlitePoolOptions::new()
.max_connections(5)
.test_before_acquire(true)
.connect(&db_url)
.await
.expect(&db_url);
sqlx::migrate!()
.run(&pool)
.await
.expect("Failed to apply migrations");
pool
}

View file

@ -1,330 +0,0 @@
#[cfg(feature = "shamir")]
pub mod shamir;
pub mod simple;
use aes_gcm_siv::{
AeadCore, Aes256GcmSiv, KeyInit,
aead::{Aead, OsRng},
};
use log::{error, info, warn};
use simple::SimpleSealing;
use tokio::sync::RwLock;
use super::DbPool;
#[derive(PartialEq)]
enum KeyEnum {
/// Final key
MainKey(Vec<u8>),
/// Encrypted with single secret (protected_rk, nonce)
Simple(SimpleSealing),
#[cfg(feature = "shamir")]
// Shamir's Secret Sharing
Shamir(shamir::ShamirBucket),
/// Unknown or not initialized
Uninitialized,
}
trait Sealing {
fn new(protected_rk: Vec<u8>, nonce: Vec<u8>) -> Self;
async fn unseal(&mut self, key: String) -> UnsealResult;
}
struct ProtectedRK {
pub protection_type: String,
pub encrypted_key: Vec<u8>,
pub nonce: Option<Vec<u8>>,
}
static ROOT_KEY_MAYBE: RwLock<KeyEnum> = RwLock::const_new(KeyEnum::Uninitialized);
/// Returns `true` if vault is initialized or unsealed.
/// Returns `false` if uninitialized (nothing in the database).
pub async fn prepare_unseal(pool: &DbPool) -> bool {
{
if !matches!(*ROOT_KEY_MAYBE.read().await, KeyEnum::Uninitialized) {
info!("Vault unseal is already prepared");
return true;
}
}
let lock = ROOT_KEY_MAYBE.write(); // Not awaited just here
let rk = sqlx::query_as!(
ProtectedRK,
"SELECT encrypted_key, type as protection_type, nonce FROM root_key ORDER BY version LIMIT 1"
)
.fetch_optional(pool)
.await
.expect("Failed to optionally read root key from the database");
let v = match rk {
Some(v) => v,
None => {
warn!("No root key was found in the database!");
return false;
}
};
info!(
"Root key of type {} found in the database",
v.protection_type
);
let mut lock = lock.await;
let nonce = v.nonce.expect("Simple encryption but the nonce is missing");
let res = match &*v.protection_type {
#[cfg(feature = "insecure-dev-sealing")]
"dev_only" => {
warn!(
"Root key is of type {}. This is INSECURE and must only be used for development purposes!",
v.protection_type
);
KeyEnum::MainKey(v.encrypted_key)
}
#[cfg(not(feature = "insecure-dev-sealing"))]
"dev_only" => panic!(
r#"Database is insecure but "insecure-dev-sealing" is not enabled for this build!"#
),
"simple" => KeyEnum::Simple(SimpleSealing::new(v.encrypted_key, nonce)),
#[cfg(feature = "shamir")]
"shamir" => KeyEnum::Shamir(shamir::ShamirBucket::new(v.encrypted_key, nonce)),
#[cfg(not(feature = "shamir"))]
"shamir" => panic!(r#"Feature "shamir" is not enabled for this build!"#),
_ => panic!("Unknown root key type in database"),
};
*lock = res;
true
}
/// Must NOT be used in production.
/// Token is plainly stored in the database and will be unsealed directly by [prepare_unseal]!
/// Danger!
#[cfg(feature = "insecure-dev-sealing")]
pub async fn init_insecure_in_db(pool: &DbPool) {
let root_key = Aes256GcmSiv::generate_key(&mut OsRng);
let root_key = root_key.as_slice().to_owned();
warn!(
"Danger: INSECURE! Generated root key is stored plainly in the database. Must ONLY be used for development!"
);
write_new_root_key(pool, root_key, "dev_only", Some(b"")).await;
}
async fn write_new_root_key(
pool: &DbPool,
protected_key: Vec<u8>,
type_to_be: &str,
nonce: Option<&[u8]>,
) {
let _ = sqlx::query!(
"
INSERT INTO root_key (encrypted_key, type, version, nonce)
VALUES ($1, $2, 1, $3)
",
protected_key,
type_to_be,
nonce
)
.execute(pool)
.await
.expect("Failed to write new root key to the database");
info!("Initialized new root key!");
}
pub async fn reseal(pool: &DbPool) {
{
let mut lock = ROOT_KEY_MAYBE.write().await;
*lock = KeyEnum::Uninitialized;
}
prepare_unseal(pool).await;
}
// pub async fn sealing_status() {
// let lock = ROOT_KEY_MAYBE.read().await;
// match &*lock {
// KeyEnum::MainKey(_) => todo!(),
// KeyEnum::Simple(_, _) => todo!(),
// KeyEnum::Uninitialized => todo!(),
// KeyEnum::Shamir(_, _) => todo!(),
// }
// }
pub async fn provide_key(key: String) -> UnsealResult {
// First, check if we need to write-lock at all
{
let read_lock = ROOT_KEY_MAYBE.read().await;
if matches!(*read_lock, KeyEnum::MainKey(_)) {
info!("Providing keys is useless since vault is already unlocked");
return UnsealResult::AlreadyDone;
} else if matches!(*read_lock, KeyEnum::Uninitialized) {
error!("Cannot process provided key when the vault is uninitialized");
return UnsealResult::Uninitialized;
}
}
// A write lock is necessary.
let mut write_lock = ROOT_KEY_MAYBE.write().await;
let rk = match &mut *write_lock {
KeyEnum::MainKey(_) | KeyEnum::Uninitialized => {
unreachable!("Should have been checked above")
}
KeyEnum::Simple(simple) => simple.unseal(key).await,
#[cfg(feature = "shamir")]
KeyEnum::Shamir(shamir) => shamir.unseal(key).await,
};
let rk = match rk {
UnsealResult::DoneConfidential(rk) => rk,
UnsealResult::Done => unreachable!(),
reject_action => return reject_action,
};
*write_lock = KeyEnum::MainKey(rk);
info!("Unsealing done; Vault ready");
UnsealResult::Done
}
pub struct Secret {
pub nonce: [u8; 12],
pub protected_data: Vec<u8>,
}
impl Secret {
pub fn new<D, N>(data: D, nonce: N) -> Self
where
D: Into<Vec<u8>>,
N: AsRef<[u8]>,
{
let nonce_slice = nonce.as_ref();
assert!(
nonce_slice.len() == 12,
"Nonce must be exactly 12 bytes long"
);
let nonce: &[u8; 12] = nonce_slice.try_into().expect("Nonce must be 12 bytes long");
Self {
nonce: *nonce,
protected_data: data.into(),
}
}
/// Encrypt a secret
///
/// # Errors
///
/// This function will return an error if the vault is uninitialized or an unknown error occurs.
pub async fn encrypt(data: &String) -> Result<Self, ()> {
let cipher = if let KeyEnum::MainKey(key) = &*ROOT_KEY_MAYBE.read().await {
match Aes256GcmSiv::new_from_slice(key) {
Ok(v) => v,
Err(e) => {
error!("Failed to create new AesGcmSiv cipher from variable size key: {e}");
return Err(());
}
}
} else {
error!("Cannot encrypt secret since the vault is not unsealed");
return Err(());
};
let nonce: aes_gcm_siv::aead::generic_array::GenericArray<
u8,
<Aes256GcmSiv as aes_gcm_siv::AeadCore>::NonceSize,
> = Aes256GcmSiv::generate_nonce(&mut OsRng); // 96-bits; unique per message
let enc = match cipher.encrypt(&nonce, data.as_bytes()) {
Ok(v) => v,
Err(e) => {
error!("Failed to encrypt secret with cipher: {e}");
return Err(());
}
};
debug_assert!(nonce.len() == 12, "Nonce should be exactly 12 bytes");
let nonce = match nonce.as_slice().try_into() {
Ok(v) => v,
Err(e) => {
error!("Nonce should be exactly 12 bytes: {e}");
return Err(());
}
};
Ok(Self {
nonce,
protected_data: enc,
})
}
pub async fn decrypt_bytes(self) -> Result<Vec<u8>, ()> {
assert!(self.nonce.len() == 12);
let cipher = match &*ROOT_KEY_MAYBE.read().await {
KeyEnum::MainKey(key) => Aes256GcmSiv::new_from_slice(key),
_ => panic!("Cannot seal secret since the vault is not unsealed"),
}
.expect("Failed to create new AesGcmSiv cipher from variable size key");
let nonce = aes_gcm_siv::aead::generic_array::GenericArray::from_slice(&self.nonce);
let enc = match cipher.decrypt(nonce, self.protected_data.as_ref()) {
Ok(v) => v,
Err(e) => {
error!("Failed to decrypt secret with given nonce and cipher: {e}");
return Err(());
}
};
Ok(enc)
}
pub async fn decrypt(self) -> Result<String, ()> {
String::from_utf8(self.decrypt_bytes().await?).map_err(|e| {
error!("Failed to parse secret as UTF8: {e}");
})
}
}
pub enum UnsealResult {
/// Unsealing finished, with root key hidden
Done,
/// Was already unsealed, no action taken
AlreadyDone,
/// Could not unseal as the vault is uninitialized
Uninitialized,
/// Unsealing finished, returns root key
DoneConfidential(Vec<u8>),
/// Unsealing attempt has been recorded but is not sufficient
Unfinished,
/// The provided or the set of previously provided portions are invalid.
/// Unsealing has been reset.
InvalidReset,
/// Duplicate share
Duplicate,
/// Error processing share, invalid
InvalidRejected,
}
pub async fn init_default(pool: &DbPool) {
#[cfg(feature = "insecure-dev-sealing")]
let user_key = {
storage::sealing::init_insecure_in_db(&pool).await;
"INSECURE automatic unlock - TESTING ONLY"
};
#[cfg(not(feature = "insecure-dev-sealing"))]
let user_key = {
#[cfg(not(feature = "shamir"))]
{
simple::init_simple(&pool).await
}
#[cfg(feature = "shamir")]
{
shamir::init_shamir(pool, 2, 5).await
}
};
let success = prepare_unseal(pool).await;
warn!("New sealing password generated: {user_key:?}");
assert!(
success,
"Vault ought to have been initialized just now but it is not."
);
}

View file

@ -1,225 +0,0 @@
use aes_gcm_siv::{
AeadCore, Aes256GcmSiv, KeyInit,
aead::{Aead, OsRng, generic_array::GenericArray},
};
use base64::{Engine, prelude::BASE64_STANDARD};
use log::{error, info, warn};
use p256::{NonZeroScalar, Scalar, SecretKey};
use serde::{Deserialize, Serialize};
use serde_json::json;
use vsss_rs::{
DefaultShare, Error as VsssErr, IdentifierPrimeField, ReadableShareSet, ShareElement,
ValuePrimeField,
};
use zeroize::ZeroizeOnDrop;
use crate::DbPool;
use super::{Sealing, UnsealResult, write_new_root_key};
type P256Share = DefaultShare<IdentifierPrimeField<Scalar>, IdentifierPrimeField<Scalar>>;
#[derive(PartialEq, Clone, Debug, Serialize, Deserialize, ZeroizeOnDrop)]
/// Differs from [P256Share] by containing Strings
struct ShamirPortion {
#[serde(rename = "i")]
pub identifier: Vec<u8>,
#[serde(rename = "v")]
pub value: Vec<u8>,
}
#[derive(PartialEq)]
/// Container for multiple [ShamirPortion]s and the protected root key.
/// Multiple instances could exist in the future for per-namespace encryption.
pub struct ShamirBucket {
portions: Vec<ShamirPortion>,
protected_rk: Vec<u8>,
nonce: Vec<u8>,
}
impl Sealing for ShamirBucket {
fn new(protected_rk: Vec<u8>, nonce: Vec<u8>) -> Self {
Self {
portions: Vec::with_capacity(2),
protected_rk,
nonce,
}
}
async fn unseal(&mut self, key: String) -> UnsealResult {
let key = match BASE64_STANDARD.decode(key) {
Ok(v) => v,
Err(e) => {
warn!("Portion could not be decoded: {e}");
return UnsealResult::InvalidRejected;
}
};
let key_portion: ShamirPortion = match serde_json::from_slice(&key) {
Ok(v) => v,
Err(e) => {
info!("Portion could not be parsed: {e}");
return UnsealResult::InvalidRejected;
}
};
if self.portions.contains(&key_portion) {
warn!("The supplied Shamir portion is already known. Duplication ignored.");
return UnsealResult::Duplicate;
}
self.portions.push(key_portion);
let joined_keys = match join_keys(&self.portions) {
Ok(v) => v,
Err(e) => {
return match e {
VsssErr::SharingMinThreshold => {
info!("Shamir portion provided. Sharing threshold not reached.");
UnsealResult::Unfinished
},
VsssErr::SharingDuplicateIdentifier => unreachable!("Addition of duplicate keys should have been prevented by not recording them"),
e => {
error!("Unknown error occurred upon joining keys {e:?}");
unreachable!()
},
};
}
}
.to_bytes();
let cipher = match Aes256GcmSiv::new_from_slice(&joined_keys) {
Ok(v) => v,
Err(e) => {
info!("Cipher could not be created from slice: {e}");
return UnsealResult::InvalidRejected;
}
};
debug_assert_eq!(self.nonce.len(), 12);
let nonce = aes_gcm_siv::aead::generic_array::GenericArray::from_slice(&self.nonce);
let root_key = cipher.decrypt(nonce, self.protected_rk.as_ref());
match root_key {
Ok(v) => UnsealResult::DoneConfidential(v),
Err(_) => {
// Err is opaque on purpose
self.portions.clear();
warn!(
"Enough shares have been provided but the set of shares is invalid. The set of shares has been reset."
);
UnsealResult::InvalidReset
}
}
}
}
/// Shamir Secret Sharing does not verify a portion for validity,
/// unlike Feldman Verified Secret Sharing, which is built on Shamir.
/// "Validation" happens by attempting to decrypt the root key.
///
/// # Returns
/// List of encoded key portions
pub async fn init_shamir(pool: &DbPool, threshold: usize, limit: usize) -> Vec<String> {
let root_key = Aes256GcmSiv::generate_key(&mut OsRng);
let nonce: GenericArray<u8, <Aes256GcmSiv as AeadCore>::NonceSize> =
Aes256GcmSiv::generate_nonce(&mut OsRng); // 96-bits; unique per message
let root_key = root_key.as_slice().to_owned();
let (user_key, protected_rk) = {
let key = Aes256GcmSiv::generate_key(&mut OsRng);
let cipher = Aes256GcmSiv::new(&key);
let nonce: &[u8] = nonce.as_slice();
debug_assert_eq!(nonce.len(), 12);
let nonce = aes_gcm_siv::aead::generic_array::GenericArray::from_slice(nonce);
let enc = cipher.encrypt(nonce, root_key.as_slice()).unwrap();
(key, enc)
};
let portions = share_keys(&mut OsRng, threshold, limit, &user_key);
log::debug!("Shared Keys: {portions:?}");
write_new_root_key(pool, protected_rk, "shamir", Some(nonce.as_slice())).await;
portions
}
/// Returns a Vec of Base64 encoded JSON-wrapped identifier-value pairs
fn share_keys(
mut osrng: &mut OsRng,
threshold: usize,
limit: usize,
root_key: &[u8],
) -> Vec<String> {
log::debug!("RK: {root_key:?}");
assert!(
threshold <= limit,
"Threshold cannot be higher than the number of shares (limit)"
);
let rk_array = GenericArray::from_slice(root_key);
let rk_scalar = NonZeroScalar::from_repr(*rk_array).unwrap();
let shared_secret = IdentifierPrimeField(*rk_scalar.as_ref());
let res =
vsss_rs::shamir::split_secret::<P256Share>(threshold, limit, &shared_secret, &mut osrng);
res.unwrap()
.iter()
.map(|f| {
BASE64_STANDARD.encode(
json!(ShamirPortion {
identifier: f.identifier.to_vec(),
value: f.value.to_vec(),
})
.to_string(),
)
})
.collect()
}
fn join_keys(shares: &[ShamirPortion]) -> Result<SecretKey, vsss_rs::Error> {
let shares: Vec<P256Share> = shares
.iter()
.map(|portion| {
let identifier = IdentifierPrimeField::<Scalar>::from_slice(&portion.identifier)
.map_err(|e| {
info!("Portion could not be converted to IdentifierPrimeField: {e}");
VsssErr::InvalidShare
})?;
let value = ValuePrimeField::<Scalar>::from_slice(&portion.value).map_err(|e| {
info!("Portion could not be converted to ValuePrimeField: {e}");
VsssErr::InvalidShare
})?;
Ok(P256Share { identifier, value })
})
.collect::<Result<_, VsssErr>>()?;
let scalar = shares.combine()?;
// A little suboptimal thanks to CtOption
let nzs = match NonZeroScalar::from_repr(scalar.0.into()).into_option() {
Some(v) => v,
None => return Err(VsssErr::InvalidShare),
};
let sk = SecretKey::from(nzs);
Ok(sk)
}
#[test]
fn split_and_join() {
let root_key = Aes256GcmSiv::generate_key(&mut OsRng);
let root_key = root_key.as_slice().to_owned();
let kps = share_keys(&mut OsRng, 2, 5, &root_key);
let kps: Vec<_> = kps
.iter()
.map(|f| {
let b = BASE64_STANDARD
.decode(f)
.expect("A portion could not be decoded from BASE64");
serde_json::from_slice(&b).expect("A portion could not be parsed as a key pair")
})
.collect();
let k = join_keys(&kps).expect("Error on joining key pairs");
assert_eq!(
root_key,
k.to_bytes().as_slice(),
"Original key and re-combined key from shares are not equal"
);
}

View file

@ -1,48 +0,0 @@
use aes_gcm_siv::{
AeadCore, Aes256GcmSiv, KeyInit,
aead::{Aead, OsRng, generic_array::GenericArray},
};
use base64::{Engine, prelude::BASE64_STANDARD};
use crate::DbPool;
use super::{Sealing, UnsealResult, write_new_root_key};
/// Pair of protected root key and nonce
#[derive(PartialEq)]
pub struct SimpleSealing(Vec<u8>, Vec<u8>);
impl Sealing for SimpleSealing {
fn new(protected_rk: Vec<u8>, nonce: Vec<u8>) -> Self {
Self(protected_rk, nonce)
}
async fn unseal(&mut self, key: String) -> UnsealResult {
let key = BASE64_STANDARD.decode(key).unwrap();
let cipher = Aes256GcmSiv::new_from_slice(&key).unwrap();
debug_assert_eq!(self.1.len(), 12);
let nonce = aes_gcm_siv::aead::generic_array::GenericArray::from_slice(self.1.as_slice());
UnsealResult::DoneConfidential(cipher.decrypt(nonce, self.0.as_ref()).unwrap())
}
}
/// Initialize the vault with a simple password
#[allow(unused)]
pub async fn init_simple(pool: &DbPool) -> String {
let root_key = Aes256GcmSiv::generate_key(&mut OsRng);
let nonce: GenericArray<u8, <Aes256GcmSiv as AeadCore>::NonceSize> =
Aes256GcmSiv::generate_nonce(&mut OsRng); // 96-bits; unique per message
let root_key = root_key.as_slice().to_owned();
let (user_key, protected_rk) = {
let key = Aes256GcmSiv::generate_key(&mut OsRng);
let cipher = Aes256GcmSiv::new(&key);
let nonce: &[u8] = nonce.as_slice();
debug_assert_eq!(nonce.len(), 12);
let nonce = aes_gcm_siv::aead::generic_array::GenericArray::from_slice(nonce);
let enc = cipher.encrypt(nonce, root_key.as_slice()).unwrap();
(key, enc)
};
write_new_root_key(pool, protected_rk, "simple", Some(nonce.as_slice())).await;
BASE64_STANDARD.encode(user_key)
}

View file

@ -1,16 +0,0 @@
mod root_generation;
mod sealing;
use axum::Router;
use root_generation::root_generation;
use sealing::sealing_routes;
use crate::storage::DbPool;
/// System routes
pub fn sys_router(pool: DbPool) -> Router<DbPool> {
Router::new()
.merge(sealing_routes())
.merge(root_generation())
.with_state(pool)
}

View file

@ -1,14 +0,0 @@
use axum::{Router, routing::post};
use crate::DbPool;
pub fn root_generation() -> Router<DbPool> {
Router::new()
// .route("/generate-root", get(get_root_generation_attempt))
// .route("/generate-root", delete(cancel_generate_root))
.route("/generate-root", post(generate_new_root))
}
async fn generate_new_root() {
todo!()
}

View file

@ -1,54 +0,0 @@
use axum::{
Json, Router,
extract::State,
routing::{get, post, put},
};
use log::warn;
use serde::Deserialize;
use crate::storage::{DbPool, sealing};
pub fn sealing_routes() -> Router<DbPool> {
Router::new()
.route("/seal", post(seal_post))
.route("/seal-status", get(seal_status_get))
.route("/unseal", post(unseal_post))
// Again? Its supposed to be POST but actually a PUT
.route("/unseal", put(unseal_post))
}
async fn seal_post(State(pool): State<DbPool>) {
sealing::reseal(&pool).await;
}
#[derive(Deserialize)]
struct UnsealRequest {
/// Required, unless `reset` is true
pub key: Option<String>,
#[serde(default)]
/// Specifies if previously-provided unseal keys are discarded and the unseal process is reset.
pub reset: bool,
// #[serde(default)]
// /// Used to migrate the seal from shamir to autoseal or autoseal to shamir. Must be provided on all unseal key calls.
// pub migrate: bool,
}
async fn unseal_post(State(pool): State<DbPool>, Json(req): Json<UnsealRequest>) -> Result<(), ()> {
if req.reset {
warn!("Unsealing progress has been reset on unseal request");
sealing::reseal(&pool).await;
}
if let Some(key) = req.key {
sealing::provide_key(key).await;
} else if !req.reset {
// No request key nor reset = bad request
return Err(());
}
Ok(())
}
async fn seal_status_get(State(_pool): State<DbPool>) -> &'static str {
todo!("not implemented")
}