Compare commits
24 commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 16aada55c5 | |||
| ba1a5f728c | |||
| ed715102c0 | |||
| 69b741fe13 | |||
| 623cc2bbaa | |||
| 2b47bb113e | |||
| 47f8e01210 | |||
| 1ac49dbb60 | |||
| 806ad1343b | |||
| b3ddae6008 | |||
| 5a10a8d4b1 | |||
| 14012b155e | |||
| 27dcc5489d | |||
| ed2620c8b8 | |||
| d77237aefe | |||
| 6eb02c8412 | |||
| 5de9e1d74e | |||
| 88ed714e22 | |||
| 4d342e8b99 | |||
| 1accd45648 | |||
| 7949d64649 | |||
| 1fe5d73483 | |||
| 491ca2fd54 | |||
| b5e086bd0a |
57 changed files with 14979 additions and 1595 deletions
5
.gitignore
vendored
5
.gitignore
vendored
|
|
@ -7,6 +7,5 @@
|
||||||
*.pdf
|
*.pdf
|
||||||
target/
|
target/
|
||||||
go_client/openapi.json
|
go_client/openapi.json
|
||||||
crates/storage-sled/sled_db
|
|
||||||
test.db
|
*.db*
|
||||||
src/storage/database.db
|
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,12 @@
|
||||||
|
{
|
||||||
|
"db_name": "SQLite",
|
||||||
|
"query": "\n UPDATE kv2_secret_version\n SET deletion_time = $4\n WHERE engine_path = $1 AND secret_path = $2\n AND version_number = $3\n ",
|
||||||
|
"describe": {
|
||||||
|
"columns": [],
|
||||||
|
"parameters": {
|
||||||
|
"Right": 4
|
||||||
|
},
|
||||||
|
"nullable": []
|
||||||
|
},
|
||||||
|
"hash": "047ebbce6fa0073cc810b189e8db3ff5e4eb347f1c1d9e5408220411a9e08b00"
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,44 @@
|
||||||
|
{
|
||||||
|
"db_name": "SQLite",
|
||||||
|
"query": "SELECT service_token.* FROM service_token, service_token_role_membership\n WHERE service_token.id = service_token_role_membership.token_id AND\n service_token_role_membership.role_name = 'root'\n LIMIT 1",
|
||||||
|
"describe": {
|
||||||
|
"columns": [
|
||||||
|
{
|
||||||
|
"name": "id",
|
||||||
|
"ordinal": 0,
|
||||||
|
"type_info": "Text"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "key",
|
||||||
|
"ordinal": 1,
|
||||||
|
"type_info": "Text"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "expiry",
|
||||||
|
"ordinal": 2,
|
||||||
|
"type_info": "Integer"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "parent_id",
|
||||||
|
"ordinal": 3,
|
||||||
|
"type_info": "Text"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "identity_id",
|
||||||
|
"ordinal": 4,
|
||||||
|
"type_info": "Text"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"parameters": {
|
||||||
|
"Right": 0
|
||||||
|
},
|
||||||
|
"nullable": [
|
||||||
|
false,
|
||||||
|
false,
|
||||||
|
true,
|
||||||
|
true,
|
||||||
|
true
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"hash": "0aa5c76c9ea1692da29a0f39998946d230f92a8f252294b25afeabe05749f4ca"
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,44 @@
|
||||||
|
{
|
||||||
|
"db_name": "SQLite",
|
||||||
|
"query": "SELECT * FROM 'service_token' WHERE key = $1 AND (expiry IS NULL OR expiry > $2) LIMIT 1",
|
||||||
|
"describe": {
|
||||||
|
"columns": [
|
||||||
|
{
|
||||||
|
"name": "id",
|
||||||
|
"ordinal": 0,
|
||||||
|
"type_info": "Text"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "key",
|
||||||
|
"ordinal": 1,
|
||||||
|
"type_info": "Text"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "expiry",
|
||||||
|
"ordinal": 2,
|
||||||
|
"type_info": "Integer"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "parent_id",
|
||||||
|
"ordinal": 3,
|
||||||
|
"type_info": "Text"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "identity_id",
|
||||||
|
"ordinal": 4,
|
||||||
|
"type_info": "Text"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"parameters": {
|
||||||
|
"Right": 2
|
||||||
|
},
|
||||||
|
"nullable": [
|
||||||
|
false,
|
||||||
|
false,
|
||||||
|
true,
|
||||||
|
true,
|
||||||
|
true
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"hash": "2cbe2fbcd5d8fb6d489f9e3cc7e04182f226964ea9d84219abbe6958dcccfefe"
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,26 @@
|
||||||
|
{
|
||||||
|
"db_name": "SQLite",
|
||||||
|
"query": "SELECT * FROM 'service_token_role_membership' WHERE token_id = $1",
|
||||||
|
"describe": {
|
||||||
|
"columns": [
|
||||||
|
{
|
||||||
|
"name": "role_name",
|
||||||
|
"ordinal": 0,
|
||||||
|
"type_info": "Text"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "token_id",
|
||||||
|
"ordinal": 1,
|
||||||
|
"type_info": "Text"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"parameters": {
|
||||||
|
"Right": 1
|
||||||
|
},
|
||||||
|
"nullable": [
|
||||||
|
false,
|
||||||
|
false
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"hash": "36485bb70f499346cd1be569887ea8b6f438f4f845ef883e80d58875b839500a"
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,20 @@
|
||||||
|
{
|
||||||
|
"db_name": "SQLite",
|
||||||
|
"query": "\n SELECT version_number AS latest_version FROM kv2_secret_version\n WHERE engine_path = $1 AND secret_path = $2 AND deletion_time IS NULL\n ORDER BY version_number DESC LIMIT 1",
|
||||||
|
"describe": {
|
||||||
|
"columns": [
|
||||||
|
{
|
||||||
|
"name": "latest_version",
|
||||||
|
"ordinal": 0,
|
||||||
|
"type_info": "Integer"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"parameters": {
|
||||||
|
"Right": 2
|
||||||
|
},
|
||||||
|
"nullable": [
|
||||||
|
false
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"hash": "414c74a3c017bde424fe44bbc251fea384b0dbedd1541900d147e0814c1f33d8"
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,32 @@
|
||||||
|
{
|
||||||
|
"db_name": "SQLite",
|
||||||
|
"query": "SELECT encrypted_key, type as protection_type, nonce FROM root_key ORDER BY version LIMIT 1",
|
||||||
|
"describe": {
|
||||||
|
"columns": [
|
||||||
|
{
|
||||||
|
"name": "encrypted_key",
|
||||||
|
"ordinal": 0,
|
||||||
|
"type_info": "Blob"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "protection_type",
|
||||||
|
"ordinal": 1,
|
||||||
|
"type_info": "Text"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "nonce",
|
||||||
|
"ordinal": 2,
|
||||||
|
"type_info": "Blob"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"parameters": {
|
||||||
|
"Right": 0
|
||||||
|
},
|
||||||
|
"nullable": [
|
||||||
|
false,
|
||||||
|
false,
|
||||||
|
true
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"hash": "5630a591626bd416be0d1ab12fa993055b521e81382897d247ceee1b41f0bf42"
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,20 @@
|
||||||
|
{
|
||||||
|
"db_name": "SQLite",
|
||||||
|
"query": "\nWITH latest_version AS (\n SELECT MAX(version_number) AS max_version\n FROM kv2_secret_version\n WHERE engine_path = $1 AND secret_path = $2 -- engine_path AND secret_path\n)\nINSERT INTO kv2_secret_version (engine_path, secret_path, nonce, encrypted_data, created_time, version_number)\nVALUES (\n $1, -- engine_path\n $2, -- secret_path\n $3, -- nonce\n $4, -- encrypted_data\n $5, -- created_time\n CASE -- Use provided version if given\n WHEN $6 IS NOT NULL THEN $6 -- version_number (optional)\n ELSE COALESCE((SELECT max_version FROM latest_version) + 1, 1) -- otherwise 1\n END -- version_number logic\n)\nRETURNING version_number;\n",
|
||||||
|
"describe": {
|
||||||
|
"columns": [
|
||||||
|
{
|
||||||
|
"name": "version_number",
|
||||||
|
"ordinal": 0,
|
||||||
|
"type_info": "Integer"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"parameters": {
|
||||||
|
"Right": 6
|
||||||
|
},
|
||||||
|
"nullable": [
|
||||||
|
false
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"hash": "8f7bfd1840d14efec44c7b59ab10461ff122ead43076ad841883a9dd189a4f37"
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,20 @@
|
||||||
|
{
|
||||||
|
"db_name": "SQLite",
|
||||||
|
"query": "SELECT engine_type FROM secret_engines WHERE mount_point = $1",
|
||||||
|
"describe": {
|
||||||
|
"columns": [
|
||||||
|
{
|
||||||
|
"name": "engine_type",
|
||||||
|
"ordinal": 0,
|
||||||
|
"type_info": "Text"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"parameters": {
|
||||||
|
"Right": 1
|
||||||
|
},
|
||||||
|
"nullable": [
|
||||||
|
false
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"hash": "9265f0195bbacd15061927c2a6034e3725a25068fd3faa08cc1d02e7c926f1c2"
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,12 @@
|
||||||
|
{
|
||||||
|
"db_name": "SQLite",
|
||||||
|
"query": "\n INSERT INTO root_key (encrypted_key, type, version, nonce)\n VALUES ($1, $2, 1, $3)\n ",
|
||||||
|
"describe": {
|
||||||
|
"columns": [],
|
||||||
|
"parameters": {
|
||||||
|
"Right": 3
|
||||||
|
},
|
||||||
|
"nullable": []
|
||||||
|
},
|
||||||
|
"hash": "aa131c57e0e255bfe07488095bdf25ab39e9dee182d0aecf988c9d3c2d04e66d"
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,12 @@
|
||||||
|
{
|
||||||
|
"db_name": "SQLite",
|
||||||
|
"query": "\n INSERT INTO kv2_metadata (engine_path, secret_path, cas_required, created_time, max_versions, updated_time)\n VALUES ($1, $2, 0, $3, 100, $3)\n ON CONFLICT(engine_path, secret_path) DO NOTHING;\n ",
|
||||||
|
"describe": {
|
||||||
|
"columns": [],
|
||||||
|
"parameters": {
|
||||||
|
"Right": 3
|
||||||
|
},
|
||||||
|
"nullable": []
|
||||||
|
},
|
||||||
|
"hash": "af57fe92ead35790b02f38f34e1614cd1accb2da61f1d9a07eeefb0fc31ec318"
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,50 @@
|
||||||
|
{
|
||||||
|
"db_name": "SQLite",
|
||||||
|
"query": "SELECT nonce, encrypted_data, created_time, deletion_time, version_number, secret_path\n FROM kv2_secret_version WHERE engine_path = $1 AND secret_path = $2 AND deletion_time IS NULL\n ORDER BY version_number DESC LIMIT 1",
|
||||||
|
"describe": {
|
||||||
|
"columns": [
|
||||||
|
{
|
||||||
|
"name": "nonce",
|
||||||
|
"ordinal": 0,
|
||||||
|
"type_info": "Blob"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "encrypted_data",
|
||||||
|
"ordinal": 1,
|
||||||
|
"type_info": "Blob"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "created_time",
|
||||||
|
"ordinal": 2,
|
||||||
|
"type_info": "Datetime"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "deletion_time",
|
||||||
|
"ordinal": 3,
|
||||||
|
"type_info": "Datetime"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "version_number",
|
||||||
|
"ordinal": 4,
|
||||||
|
"type_info": "Integer"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "secret_path",
|
||||||
|
"ordinal": 5,
|
||||||
|
"type_info": "Text"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"parameters": {
|
||||||
|
"Right": 2
|
||||||
|
},
|
||||||
|
"nullable": [
|
||||||
|
false,
|
||||||
|
false,
|
||||||
|
false,
|
||||||
|
true,
|
||||||
|
false,
|
||||||
|
false
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"hash": "b78c62fe22c4e93c54ecbc0c0cdfa31387baf14bea1ac8d27170e8b6cb456114"
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,50 @@
|
||||||
|
{
|
||||||
|
"db_name": "SQLite",
|
||||||
|
"query": "SELECT nonce, encrypted_data, created_time, deletion_time, version_number, secret_path\n FROM kv2_secret_version WHERE engine_path = $1 AND secret_path = $2 AND deletion_time IS NULL\n AND version_number = $3",
|
||||||
|
"describe": {
|
||||||
|
"columns": [
|
||||||
|
{
|
||||||
|
"name": "nonce",
|
||||||
|
"ordinal": 0,
|
||||||
|
"type_info": "Blob"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "encrypted_data",
|
||||||
|
"ordinal": 1,
|
||||||
|
"type_info": "Blob"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "created_time",
|
||||||
|
"ordinal": 2,
|
||||||
|
"type_info": "Datetime"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "deletion_time",
|
||||||
|
"ordinal": 3,
|
||||||
|
"type_info": "Datetime"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "version_number",
|
||||||
|
"ordinal": 4,
|
||||||
|
"type_info": "Integer"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "secret_path",
|
||||||
|
"ordinal": 5,
|
||||||
|
"type_info": "Text"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"parameters": {
|
||||||
|
"Right": 3
|
||||||
|
},
|
||||||
|
"nullable": [
|
||||||
|
false,
|
||||||
|
false,
|
||||||
|
false,
|
||||||
|
true,
|
||||||
|
false,
|
||||||
|
false
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"hash": "fa8c74205ae4d497983d394ee04181c08d20cdb4a93bfce3c06a114133cd6619"
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,12 @@
|
||||||
|
{
|
||||||
|
"db_name": "SQLite",
|
||||||
|
"query": "\n INSERT INTO service_token (id, key) VALUES ($1, $2);\n INSERT INTO service_token_role_membership (token_id, role_name) VALUES ($3, 'root');\n ",
|
||||||
|
"describe": {
|
||||||
|
"columns": [],
|
||||||
|
"parameters": {
|
||||||
|
"Right": 3
|
||||||
|
},
|
||||||
|
"nullable": []
|
||||||
|
},
|
||||||
|
"hash": "fe6bf34448b9f9defc27ce30a128935d991cd06e22861086c3b1377916731e57"
|
||||||
|
}
|
||||||
1841
Cargo.lock
generated
1841
Cargo.lock
generated
File diff suppressed because it is too large
Load diff
40
Cargo.toml
40
Cargo.toml
|
|
@ -1,32 +1,44 @@
|
||||||
[package]
|
[package]
|
||||||
name = "rvault-server"
|
name = "rvault-server"
|
||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
edition = "2021"
|
edition = "2024"
|
||||||
|
|
||||||
|
[features]
|
||||||
|
default = ["shamir"]
|
||||||
|
# default = ["insecure-dev-sealing"]
|
||||||
|
insecure-dev-sealing = []
|
||||||
|
shamir = ["vsss-rs", "p256"]
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
log = "0.4.21"
|
log = "0.4.27"
|
||||||
env_logger = "0.11.3"
|
env_logger = "0.11.7"
|
||||||
zeroize = { version = "1.7.0", features = ["derive"]}
|
zeroize = { version = "1.8.1", features = ["zeroize_derive"] }
|
||||||
chrono = { version = "0.4.38", features = ["serde"] }
|
time = { version = "0.3.41", features = ["serde", "formatting"]}
|
||||||
tokio = { version = "1.37.0", features = ["full"] }
|
tokio = { version = "1.44.1", features = ["full"] }
|
||||||
tower = { version = "0.4.13", features = [] }
|
tower = { version = "0.5.2", features = [] }
|
||||||
axum = "0.7.5"
|
axum = "0.8.3"
|
||||||
serde = "1.0.201"
|
serde = "1.0.219"
|
||||||
serde_json = "1.0.117"
|
serde_json = "1.0.140"
|
||||||
# json-patch = "2.0.0"
|
dotenvy = "0.15.7"
|
||||||
# serde_with = "3.8.1"
|
base64 = "0.22.1"
|
||||||
|
|
||||||
# utoipa = { version = "4.2.0", features = ["axum_extras"] }
|
# utoipa = { version = "4.2.0", features = ["axum_extras"] }
|
||||||
sqlx = { version = "0.7.4", features = [
|
sqlx = { version = "0.8.3", features = [
|
||||||
"sqlite",
|
"sqlite",
|
||||||
# "postgres",
|
# "postgres",
|
||||||
# "any",
|
# "any",
|
||||||
"chrono",
|
|
||||||
"macros",
|
"macros",
|
||||||
"runtime-tokio",
|
"runtime-tokio",
|
||||||
"tls-rustls",
|
"tls-rustls",
|
||||||
|
"time"
|
||||||
] }
|
] }
|
||||||
|
|
||||||
|
aes-gcm-siv = "0.11.1"
|
||||||
|
vsss-rs = { version = "5.1.0", optional = true, default-features = false, features = ["zeroize", "std"] }
|
||||||
|
p256 = { version = "0.13.2", optional = true, default-features = false, features = ["std", "ecdsa"] }
|
||||||
|
rand = "0.8.5"
|
||||||
|
uuid = { version = "1.16.0", features = ["v4"] }
|
||||||
|
|
||||||
[lints]
|
[lints]
|
||||||
workspace = true
|
workspace = true
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -1,9 +1,12 @@
|
||||||
FROM docker.io/library/rust:1-alpine3.19 AS builder
|
ARG alpine_version=3.22
|
||||||
|
|
||||||
|
FROM docker.io/library/rust:1-alpine${alpine_version} AS builder
|
||||||
|
|
||||||
WORKDIR /src
|
WORKDIR /src
|
||||||
RUN apk add --no-cache musl-dev
|
RUN apk add --no-cache musl-dev
|
||||||
|
|
||||||
RUN cargo install sqlx-cli --no-default-features --features sqlite
|
RUN cargo install sqlx-cli --no-default-features --features sqlite
|
||||||
|
# Required for compile-time schemata checks of migrations
|
||||||
ENV DATABASE_URL=sqlite:/tmp/rvault.db
|
ENV DATABASE_URL=sqlite:/tmp/rvault.db
|
||||||
RUN touch /tmp/rvault.db
|
RUN touch /tmp/rvault.db
|
||||||
|
|
||||||
|
|
@ -13,6 +16,7 @@ RUN --mount=type=cache,target=/usr/local/cargo/registry \
|
||||||
cargo fetch --locked --target $(rustc -vV | sed -n 's|host: ||p') && \
|
cargo fetch --locked --target $(rustc -vV | sed -n 's|host: ||p') && \
|
||||||
rm src/main.rs
|
rm src/main.rs
|
||||||
|
|
||||||
|
# Required for compile-time checks of query - database-schema compatibility
|
||||||
COPY migrations migrations
|
COPY migrations migrations
|
||||||
RUN cargo sqlx migrate run
|
RUN cargo sqlx migrate run
|
||||||
|
|
||||||
|
|
@ -20,9 +24,9 @@ COPY src src
|
||||||
RUN --mount=type=cache,target=/usr/local/cargo/registry \
|
RUN --mount=type=cache,target=/usr/local/cargo/registry \
|
||||||
cargo build --release --locked
|
cargo build --release --locked
|
||||||
|
|
||||||
FROM docker.io/library/alpine:3.19 AS runner
|
FROM docker.io/library/alpine:${alpine_version} AS runner
|
||||||
# FROM scratch AS runner
|
# FROM scratch AS runner
|
||||||
|
|
||||||
COPY --from=builder /src/target/release/rvault-server /usr/local/bin/rvault-server
|
COPY --from=builder /src/target/release/rvault-server /usr/bin/rvault-server
|
||||||
|
|
||||||
CMD ["/usr/local/bin/rvault-server"]
|
CMD ["/usr/bin/rvault-server"]
|
||||||
|
|
|
||||||
2
Justfile
2
Justfile
|
|
@ -1,6 +1,6 @@
|
||||||
|
|
||||||
build_tests:
|
build_tests:
|
||||||
podman build -t rvault-go-tests -f ./go_client/Containerfile ./go_client
|
podman build -t rvault-go-tests -f ./go_tests/Containerfile ./go_tests
|
||||||
|
|
||||||
run_tests: build_tests
|
run_tests: build_tests
|
||||||
podman run --rm -it --net=host rvault-go-tests
|
podman run --rm -it --net=host rvault-go-tests
|
||||||
|
|
|
||||||
30
README.md
Normal file
30
README.md
Normal file
|
|
@ -0,0 +1,30 @@
|
||||||
|
|
||||||
|
# rvault
|
||||||
|
|
||||||
|
rvault is an open-source implementation of the API of Vault and OpenBao, written in Rust.
|
||||||
|
|
||||||
|
## Running
|
||||||
|
|
||||||
|
You can run an offline build with `SQLX_OFFLINE=true cargo run` or `build`, respectively.
|
||||||
|
An offline build requires an up-to-date SQLx preparation.
|
||||||
|
|
||||||
|
An OCI container image can be created using `podman build . -t rvault`.
|
||||||
|
|
||||||
|
Furthermore, rvault attempts to read a `.env` file in the current working directory.
|
||||||
|
For example, its content could be:
|
||||||
|
|
||||||
|
```txt
|
||||||
|
DATABASE_URL=sqlite:test.db
|
||||||
|
RUST_LOG=debug
|
||||||
|
```
|
||||||
|
|
||||||
|
## Development
|
||||||
|
|
||||||
|
SQLx preparation can be updated with `cargo sqlx prep`.
|
||||||
|
Hence, it is not useful for development.
|
||||||
|
With `cargo sqlx database reset` the database will be recreated,
|
||||||
|
deleting all contents and reapplying migrations.
|
||||||
|
This is helpful when changing migrations during development.
|
||||||
|
|
||||||
|
When running a normal, not-offline, build, the database must be migrated (e.g. using `cargo sqlx database reset`)
|
||||||
|
for compilation of compile-time-checked queries.
|
||||||
|
|
@ -1,16 +0,0 @@
|
||||||
module github.com/C0ffeeCode/rvault/go_client
|
|
||||||
|
|
||||||
go 1.21.9
|
|
||||||
|
|
||||||
require github.com/hashicorp/vault-client-go v0.4.3
|
|
||||||
|
|
||||||
require (
|
|
||||||
github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
|
|
||||||
github.com/hashicorp/go-retryablehttp v0.7.5 // indirect
|
|
||||||
github.com/hashicorp/go-rootcerts v1.0.2 // indirect
|
|
||||||
github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 // indirect
|
|
||||||
github.com/mitchellh/go-homedir v1.1.0 // indirect
|
|
||||||
github.com/ryanuber/go-glob v1.0.0 // indirect
|
|
||||||
golang.org/x/sys v0.19.0 // indirect
|
|
||||||
golang.org/x/time v0.5.0 // indirect
|
|
||||||
)
|
|
||||||
|
|
@ -1,29 +0,0 @@
|
||||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
|
||||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
|
||||||
github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ=
|
|
||||||
github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48=
|
|
||||||
github.com/hashicorp/go-hclog v0.9.2 h1:CG6TE5H9/JXsFWJCfoIVpKFIkFe6ysEuHirp4DxCsHI=
|
|
||||||
github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ=
|
|
||||||
github.com/hashicorp/go-retryablehttp v0.7.5 h1:bJj+Pj19UZMIweq/iie+1u5YCdGrnxCT9yvm0e+Nd5M=
|
|
||||||
github.com/hashicorp/go-retryablehttp v0.7.5/go.mod h1:Jy/gPYAdjqffZ/yFGCFV2doI5wjtH1ewM9u8iYVjtX8=
|
|
||||||
github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc=
|
|
||||||
github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8=
|
|
||||||
github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 h1:kes8mmyCpxJsI7FTwtzRqEy9CdjCtrXrXGuOpxEA7Ts=
|
|
||||||
github.com/hashicorp/go-secure-stdlib/strutil v0.1.2/go.mod h1:Gou2R9+il93BqX25LAKCLuM+y9U2T4hlwvT1yprcna4=
|
|
||||||
github.com/hashicorp/vault-client-go v0.4.3 h1:zG7STGVgn/VK6rnZc0k8PGbfv2x/sJExRKHSUg3ljWc=
|
|
||||||
github.com/hashicorp/vault-client-go v0.4.3/go.mod h1:4tDw7Uhq5XOxS1fO+oMtotHL7j4sB9cp0T7U6m4FzDY=
|
|
||||||
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
|
|
||||||
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
|
||||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
|
||||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
|
||||||
github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk=
|
|
||||||
github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc=
|
|
||||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
|
||||||
github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk=
|
|
||||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
|
||||||
golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o=
|
|
||||||
golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
|
||||||
golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk=
|
|
||||||
golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
|
||||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
|
||||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
|
||||||
|
|
@ -1,29 +0,0 @@
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"log"
|
|
||||||
"log/slog"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/hashicorp/vault-client-go"
|
|
||||||
)
|
|
||||||
|
|
||||||
// vault cmd args: >vault server -dev -dev-root-token-id="my-token"
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
slog.Info("run tests in tests/ with >go test")
|
|
||||||
// prepare a client with the given base address
|
|
||||||
client, err := vault.New(
|
|
||||||
vault.WithAddress("http://localhost:8200"),
|
|
||||||
vault.WithRequestTimeout(30*time.Second),
|
|
||||||
)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatal(err)
|
|
||||||
}
|
|
||||||
log.Println("client prepared")
|
|
||||||
|
|
||||||
// authenticate with a root token (insecure)
|
|
||||||
if err := client.SetToken("my-token"); err != nil {
|
|
||||||
log.Fatal(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -1,151 +0,0 @@
|
||||||
package tests
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"log"
|
|
||||||
"os"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/hashicorp/vault-client-go"
|
|
||||||
"github.com/hashicorp/vault-client-go/schema"
|
|
||||||
)
|
|
||||||
|
|
||||||
var client *vault.Client
|
|
||||||
var ctx context.Context
|
|
||||||
|
|
||||||
// Apparently used as a default if mountpath is an empty string (client library)
|
|
||||||
var mountpath = "/kv-v2"
|
|
||||||
var mountpath2 = "/some"
|
|
||||||
|
|
||||||
func TestMain(m *testing.M) {
|
|
||||||
ctx = context.Background()
|
|
||||||
var err error
|
|
||||||
// prepare a client with the given base address
|
|
||||||
client, err = vault.New(
|
|
||||||
vault.WithAddress("http://localhost:8200"),
|
|
||||||
vault.WithRequestTimeout(30*time.Second),
|
|
||||||
)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatal(err)
|
|
||||||
}
|
|
||||||
log.Println("client prepared")
|
|
||||||
|
|
||||||
// authenticate with a root token (insecure)
|
|
||||||
if err := client.SetToken("my-token"); err != nil {
|
|
||||||
log.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
exitCode := m.Run() // run all tests and get code
|
|
||||||
os.Exit(exitCode)
|
|
||||||
}
|
|
||||||
|
|
||||||
// https://developer.hashicorp.com/vault/api-docs/secret/kv/kv-v2#create-update-secret
|
|
||||||
func TestWriteSecret(t *testing.T) {
|
|
||||||
// Path foo
|
|
||||||
_, err := client.Secrets.KvV2Write(ctx, "foo", schema.KvV2WriteRequest{
|
|
||||||
Data: map[string]any{
|
|
||||||
"password1": "123abc",
|
|
||||||
"password2": "horse horse horse battery staple correct",
|
|
||||||
}},
|
|
||||||
vault.WithMountPath(mountpath),
|
|
||||||
)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatal("kv2: Failed to write secret:\n\t", err)
|
|
||||||
}
|
|
||||||
log.Println("kv2: Tried to write Secret at foo at mountpath: ", mountpath)
|
|
||||||
|
|
||||||
// Path bar
|
|
||||||
_, err = client.Secrets.KvV2Write(ctx, "bar", schema.KvV2WriteRequest{
|
|
||||||
Data: map[string]any{
|
|
||||||
"password1": "abc123",
|
|
||||||
"password2": "correct horse battery staple",
|
|
||||||
}},
|
|
||||||
vault.WithMountPath(mountpath),
|
|
||||||
)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatal("kv2: Failed to write secret:\n\t", err)
|
|
||||||
}
|
|
||||||
log.Println("kv2: Tried to write Secret at bar at mountpath: ", mountpath)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestWriteSecret2(t *testing.T) {
|
|
||||||
// Path foo
|
|
||||||
_, err := client.Secrets.KvV2Write(ctx, "foo", schema.KvV2WriteRequest{
|
|
||||||
Data: map[string]any{
|
|
||||||
"password1": "123abc",
|
|
||||||
"password2": "horse horse horse battery staple correct",
|
|
||||||
}},
|
|
||||||
vault.WithMountPath(mountpath2),
|
|
||||||
)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatal("kv2: Failed to write secret:\n\t", err)
|
|
||||||
}
|
|
||||||
log.Println("kv2: Tried to write Secret at foo at mountpath: ", mountpath2)
|
|
||||||
|
|
||||||
// Path bar
|
|
||||||
_, err = client.Secrets.KvV2Write(ctx, "bar", schema.KvV2WriteRequest{
|
|
||||||
Data: map[string]any{
|
|
||||||
"password1": "abc123",
|
|
||||||
"password2": "correct horse battery staple",
|
|
||||||
}},
|
|
||||||
vault.WithMountPath(mountpath2),
|
|
||||||
)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatal("kv2: Failed to write secret:\n\t", err)
|
|
||||||
}
|
|
||||||
log.Println("kv2: Tried to write Secret at foo at mountpath: ", mountpath2)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestReadSecret(t *testing.T) {
|
|
||||||
_, err := client.Secrets.KvV2Read(ctx, "bar")
|
|
||||||
if err != nil {
|
|
||||||
log.Fatal("kv2: Failed to read secret:\n\t", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestReadMeta(t *testing.T) {
|
|
||||||
_, err := client.Secrets.KvV2ReadMetadata(ctx, "bar")
|
|
||||||
if err != nil {
|
|
||||||
log.Fatal("kv2: Failed to read metadata:\n\t", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestWriteAndReadMeta(t *testing.T) {
|
|
||||||
meta := schema.KvV2WriteMetadataRequest{
|
|
||||||
MaxVersions: 5,
|
|
||||||
CasRequired: false,
|
|
||||||
DeleteVersionAfter: "3h25m19s",
|
|
||||||
CustomMetadata: map[string]interface{}{
|
|
||||||
"foo": "abc",
|
|
||||||
"bar": "123",
|
|
||||||
"baz": "5c07d823-3810-48f6-a147-4c06b5219e84",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
_, err := client.Secrets.KvV2WriteMetadata(ctx, "newMeta", meta)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatal("kv2: Failed to write metadata:\n\t", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// read the metadata
|
|
||||||
_, err2 := client.Secrets.KvV2ReadMetadata(ctx, "newMeta")
|
|
||||||
if err2 != nil {
|
|
||||||
log.Fatal("kv2: Failed to read metadata:\n\t", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// does NOT revert destruction
|
|
||||||
func TestDestroySecret(t *testing.T) {
|
|
||||||
_, err := client.Secrets.KvV2DestroyVersions(ctx, "bar", schema.KvV2DestroyVersionsRequest{Versions: []int32{1}})
|
|
||||||
if err != nil {
|
|
||||||
log.Fatal("kv2: Failed to destroy secret:\n\t", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// does NOT revert destruction
|
|
||||||
func TestDestroySecret2(t *testing.T) {
|
|
||||||
_, err := client.Secrets.KvV2DestroyVersions(ctx, "bar", schema.KvV2DestroyVersionsRequest{Versions: []int32{1, 2}})
|
|
||||||
if err != nil {
|
|
||||||
log.Fatal("kv2: Failed to destroy secret:\n\t", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -8,7 +8,6 @@ RUN go mod download
|
||||||
COPY . .
|
COPY . .
|
||||||
# RUN go build -o /app
|
# RUN go build -o /app
|
||||||
RUN go build
|
RUN go build
|
||||||
# CMD export GOCACHE=off
|
|
||||||
CMD go test tests/*
|
CMD go test tests/*
|
||||||
|
|
||||||
# FROM docker.io/library/alpine:3.19
|
# FROM docker.io/library/alpine:3.19
|
||||||
32
go_tests/go.mod
Normal file
32
go_tests/go.mod
Normal file
|
|
@ -0,0 +1,32 @@
|
||||||
|
module github.com/C0ffeeCode/rvault/go_client
|
||||||
|
|
||||||
|
go 1.21.9
|
||||||
|
|
||||||
|
// require github.com/hashicorp/vault-client-go v0.4.3
|
||||||
|
|
||||||
|
require (
|
||||||
|
github.com/hashicorp/vault-client-go v0.4.3
|
||||||
|
github.com/hashicorp/vault/api v1.16.0
|
||||||
|
)
|
||||||
|
|
||||||
|
require (
|
||||||
|
github.com/cenkalti/backoff/v4 v4.3.0 // indirect
|
||||||
|
github.com/go-jose/go-jose/v4 v4.0.1 // indirect
|
||||||
|
github.com/hashicorp/errwrap v1.1.0 // indirect
|
||||||
|
github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
|
||||||
|
github.com/hashicorp/go-multierror v1.1.1 // indirect
|
||||||
|
github.com/hashicorp/go-retryablehttp v0.7.7 // indirect
|
||||||
|
github.com/hashicorp/go-rootcerts v1.0.2 // indirect
|
||||||
|
github.com/hashicorp/go-secure-stdlib/parseutil v0.1.6 // indirect
|
||||||
|
github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 // indirect
|
||||||
|
github.com/hashicorp/go-sockaddr v1.0.2 // indirect
|
||||||
|
github.com/hashicorp/hcl v1.0.0 // indirect
|
||||||
|
github.com/mitchellh/go-homedir v1.1.0 // indirect
|
||||||
|
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
||||||
|
github.com/ryanuber/go-glob v1.0.0 // indirect
|
||||||
|
golang.org/x/crypto v0.32.0 // indirect
|
||||||
|
golang.org/x/net v0.34.0 // indirect
|
||||||
|
golang.org/x/sys v0.29.0 // indirect
|
||||||
|
golang.org/x/text v0.21.0 // indirect
|
||||||
|
golang.org/x/time v0.5.0 // indirect
|
||||||
|
)
|
||||||
81
go_tests/go.sum
Normal file
81
go_tests/go.sum
Normal file
|
|
@ -0,0 +1,81 @@
|
||||||
|
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
|
||||||
|
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
|
||||||
|
github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8=
|
||||||
|
github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
|
||||||
|
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
|
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||||
|
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
|
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
|
||||||
|
github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM=
|
||||||
|
github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE=
|
||||||
|
github.com/go-jose/go-jose/v4 v4.0.1 h1:QVEPDE3OluqXBQZDcnNvQrInro2h0e4eqNbnZSWqS6U=
|
||||||
|
github.com/go-jose/go-jose/v4 v4.0.1/go.mod h1:WVf9LFMHh/QVrmqrOfqun0C45tMe3RoiKJMPvgWwLfY=
|
||||||
|
github.com/go-test/deep v1.0.2 h1:onZX1rnHT3Wv6cqNgYyFOOlgVKJrksuCMCRvJStbMYw=
|
||||||
|
github.com/go-test/deep v1.0.2/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA=
|
||||||
|
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
|
||||||
|
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||||
|
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||||
|
github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
|
||||||
|
github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||||
|
github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ=
|
||||||
|
github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48=
|
||||||
|
github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k=
|
||||||
|
github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M=
|
||||||
|
github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
|
||||||
|
github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
|
||||||
|
github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
|
||||||
|
github.com/hashicorp/go-retryablehttp v0.7.7 h1:C8hUCYzor8PIfXHa4UrZkU4VvK8o9ISHxT2Q8+VepXU=
|
||||||
|
github.com/hashicorp/go-retryablehttp v0.7.7/go.mod h1:pkQpWZeYWskR+D1tR2O5OcBFOxfA7DoAO6xtkuQnHTk=
|
||||||
|
github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc=
|
||||||
|
github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8=
|
||||||
|
github.com/hashicorp/go-secure-stdlib/parseutil v0.1.6 h1:om4Al8Oy7kCm/B86rLCLah4Dt5Aa0Fr5rYBG60OzwHQ=
|
||||||
|
github.com/hashicorp/go-secure-stdlib/parseutil v0.1.6/go.mod h1:QmrqtbKuxxSWTN3ETMPuB+VtEiBJ/A9XhoYGv8E1uD8=
|
||||||
|
github.com/hashicorp/go-secure-stdlib/strutil v0.1.1/go.mod h1:gKOamz3EwoIoJq7mlMIRBpVTAUn8qPCrEclOKKWhD3U=
|
||||||
|
github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 h1:kes8mmyCpxJsI7FTwtzRqEy9CdjCtrXrXGuOpxEA7Ts=
|
||||||
|
github.com/hashicorp/go-secure-stdlib/strutil v0.1.2/go.mod h1:Gou2R9+il93BqX25LAKCLuM+y9U2T4hlwvT1yprcna4=
|
||||||
|
github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc=
|
||||||
|
github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A=
|
||||||
|
github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
|
||||||
|
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
|
||||||
|
github.com/hashicorp/vault-client-go v0.4.3 h1:zG7STGVgn/VK6rnZc0k8PGbfv2x/sJExRKHSUg3ljWc=
|
||||||
|
github.com/hashicorp/vault-client-go v0.4.3/go.mod h1:4tDw7Uhq5XOxS1fO+oMtotHL7j4sB9cp0T7U6m4FzDY=
|
||||||
|
github.com/hashicorp/vault/api v1.16.0 h1:nbEYGJiAPGzT9U4oWgaaB0g+Rj8E59QuHKyA5LhwQN4=
|
||||||
|
github.com/hashicorp/vault/api v1.16.0/go.mod h1:KhuUhzOD8lDSk29AtzNjgAu2kxRA9jL9NAbkFlqvkBA=
|
||||||
|
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
|
||||||
|
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
|
||||||
|
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
|
||||||
|
github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
|
||||||
|
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
|
||||||
|
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||||
|
github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
|
||||||
|
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
|
||||||
|
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||||
|
github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo=
|
||||||
|
github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||||
|
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
|
||||||
|
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||||
|
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||||
|
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||||
|
github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
|
||||||
|
github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
|
||||||
|
github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk=
|
||||||
|
github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc=
|
||||||
|
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||||
|
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||||
|
github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
|
||||||
|
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||||
|
golang.org/x/crypto v0.32.0 h1:euUpcYgM8WcP71gNpTqQCn6rC2t6ULUPiOzfWaXVVfc=
|
||||||
|
golang.org/x/crypto v0.32.0/go.mod h1:ZnnJkOaASj8g0AjIduWNlq2NRxL0PlBrbKVyZ6V/Ugc=
|
||||||
|
golang.org/x/net v0.34.0 h1:Mb7Mrk043xzHgnRM88suvJFwzVrRfHEHJEl5/71CKw0=
|
||||||
|
golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k=
|
||||||
|
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
|
golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU=
|
||||||
|
golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||||
|
golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
|
||||||
|
golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
|
||||||
|
golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk=
|
||||||
|
golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
||||||
|
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
|
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||||
|
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||||
|
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||||
26
go_tests/main.go
Normal file
26
go_tests/main.go
Normal file
|
|
@ -0,0 +1,26 @@
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"log/slog"
|
||||||
|
// "github.com/openbao/openbao"
|
||||||
|
)
|
||||||
|
|
||||||
|
// vault cmd args: >vault server -dev -dev-root-token-id="my-token"
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
slog.Info("run tests in tests/ with >go test")
|
||||||
|
// // prepare a client with the given base address
|
||||||
|
// client, err := vault.New(
|
||||||
|
// vault.WithAddress("http://localhost:8200"),
|
||||||
|
// vault.WithRequestTimeout(30*time.Second),
|
||||||
|
// )
|
||||||
|
// if err != nil {
|
||||||
|
// log.Fatal(err)
|
||||||
|
// }
|
||||||
|
// log.Println("client prepared")
|
||||||
|
|
||||||
|
// // authenticate with a root token (insecure)
|
||||||
|
// if err := client.SetToken("my-token"); err != nil {
|
||||||
|
// log.Fatal(err)
|
||||||
|
// }
|
||||||
|
}
|
||||||
123
go_tests/tests/secret_test.go
Normal file
123
go_tests/tests/secret_test.go
Normal file
|
|
@ -0,0 +1,123 @@
|
||||||
|
package tests
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"log"
|
||||||
|
"os"
|
||||||
|
"reflect"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
// "github.com/hashicorp/vault-client-go"
|
||||||
|
// "github.com/hashicorp/vault-client-go/schema"
|
||||||
|
vault "github.com/hashicorp/vault/api"
|
||||||
|
// vault "github.com/openbao/openbao/api/v2"
|
||||||
|
)
|
||||||
|
|
||||||
|
var Client *vault.Client
|
||||||
|
var ctx context.Context
|
||||||
|
|
||||||
|
func TestMain(m *testing.M) {
|
||||||
|
ctx = context.Background()
|
||||||
|
var err error
|
||||||
|
|
||||||
|
config := vault.DefaultConfig()
|
||||||
|
config.Address = "http://localhost:8200"
|
||||||
|
config.Timeout = 30 * time.Second
|
||||||
|
|
||||||
|
// prepare a client with the given base address
|
||||||
|
Client, err = vault.NewClient(config)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("unable to initialize Vault client: %v", err)
|
||||||
|
}
|
||||||
|
log.Println("client prepared")
|
||||||
|
|
||||||
|
// authenticate with a root token (insecure)
|
||||||
|
Client.SetToken("my-token")
|
||||||
|
|
||||||
|
exitCode := m.Run() // run all tests and get code
|
||||||
|
os.Exit(exitCode)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Requires in-code portions
|
||||||
|
// func TestUnseal(t *testing.T) {
|
||||||
|
// abc := []string{
|
||||||
|
// "eyJpIjpbMCwwLDAsMCwwLDAsMCwwLDAsMCwwLDAsMCwwLDAsMCwwLDAsMCwwLDAsMCwwLDAsMCwwLDAsMCwwLDAsMCwxXSwidiI6WzE4OCw2NiwxMTksMTQ0LDE1OSw3MCw4NiwxMTUsMTIwLDI1MywxMjQsOTYsMTM5LDk0LDQ1LDE2NiwyMTMsMzYsMTE1LDU4LDg5LDE0OCw2MCwyOCwxNTAsMTE2LDU3LDg5LDIwMCw5NywxNDYsMjEzXX0=",
|
||||||
|
// "eyJpIjpbMCwwLDAsMCwwLDAsMCwwLDAsMCwwLDAsMCwwLDAsMCwwLDAsMCwwLDAsMCwwLDAsMCwwLDAsMCwwLDAsMCwyXSwidiI6WzE1OCwyNDQsNzEsOTUsMTIyLDEzOCwyNDEsMjEzLDQ1LDE1NiwxMTgsNCwxNzYsNiwxNTcsMTkyLDE2MSwxNjEsNDMsMTc1LDE5NSw4NywxODAsMTAwLDE1NiwxNCwxNDgsMTUsMTc4LDkwLDY3LDExOF19",
|
||||||
|
// }
|
||||||
|
// for i := range abc {
|
||||||
|
// if _, err := Client.Sys().Unseal(abc[i]); err != nil {
|
||||||
|
// t.Fatal("Error unsealing", err)
|
||||||
|
// }
|
||||||
|
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
|
||||||
|
func kv2Write(t *testing.T, mount string, path string) {
|
||||||
|
data := map[string]any{
|
||||||
|
"password1": "123abc",
|
||||||
|
"password2": "horse horse horse battery staple correct",
|
||||||
|
}
|
||||||
|
t.Logf("Attempting to write to KV2 %s path %s:\t", mount, path)
|
||||||
|
v, err := Client.KVv2(mount).Put(ctx, path, data)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("ERROR writing secret:\n\t", err)
|
||||||
|
}
|
||||||
|
t.Log("Success (unchecked)\n\t", v)
|
||||||
|
|
||||||
|
res, err := Client.KVv2(mount).Get(ctx, path)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("ERROR checking/reading secret (request failed)\n\t", err)
|
||||||
|
}
|
||||||
|
if !reflect.DeepEqual(res.Data, data) {
|
||||||
|
t.Fatal("AAAAH", res.Data)
|
||||||
|
t.Fatalf("ERROR secret received does not match what was outght to be written.\n\tWritten: %s\n\tReceived: %s\n", data, res.Data)
|
||||||
|
// t.Fatal("\tWritten: ", newVar.Data)
|
||||||
|
// t.Fatal("\tReceived:", res.Data.Data)
|
||||||
|
}
|
||||||
|
t.Logf("SUCCESS writing to KV2 %s path %s\n", mount, path)
|
||||||
|
}
|
||||||
|
|
||||||
|
func kv2Delete(t *testing.T, mount string, path string) {
|
||||||
|
err := Client.KVv2(mount).Delete(ctx, path) // currently disregarding modifier options
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal("ERROR deleting secret:\n\t", err)
|
||||||
|
}
|
||||||
|
res, err := Client.KVv2(mount).Get(ctx, path)
|
||||||
|
if res != nil || err == nil {
|
||||||
|
t.Fatal("ERROR checking/reading secret (request failed)\n\t", res, err)
|
||||||
|
}
|
||||||
|
t.Logf("SUCCESS deleting KV2 secret %s path %s\n", mount, path)
|
||||||
|
}
|
||||||
|
|
||||||
|
// https://developer.hashicorp.com/vault/api-docs/secret/kv/kv-v2#create-update-secret
|
||||||
|
// @Philip der Path steht in der KvV2Write Methode
|
||||||
|
func TestWriteSecret(t *testing.T) {
|
||||||
|
// Apparently used as a default if mountpath is an empty string (client library)
|
||||||
|
var mountpath = "/kv-v2"
|
||||||
|
var mountpath2 = "/some"
|
||||||
|
|
||||||
|
// Path foo
|
||||||
|
t.Logf("Writing to first KV2 engine at %s...", mountpath)
|
||||||
|
kv2Write(t, mountpath, "foo")
|
||||||
|
kv2Write(t, mountpath, "bar")
|
||||||
|
t.Logf("Writing to second KV2 engine at %s...", mountpath2)
|
||||||
|
kv2Write(t, mountpath2, "foo")
|
||||||
|
kv2Write(t, mountpath2, "bar")
|
||||||
|
t.Logf("Deleting...")
|
||||||
|
kv2Delete(t, mountpath, "foo")
|
||||||
|
}
|
||||||
|
|
||||||
|
// func TestDeleteSecret(t *testing.T) {
|
||||||
|
// _, err := client.Secrets.KvV2Delete(ctx, "foo") // currently disregarding modifier options
|
||||||
|
// if err != nil {
|
||||||
|
// log.Fatal("kv2: Failed to delete secret:\n\t", err)
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
|
||||||
|
// func TestReadSecret(t *testing.T) {
|
||||||
|
// _, err := client.Secrets.KvV2Read(ctx, "bar")
|
||||||
|
// if err != nil {
|
||||||
|
// log.Fatal("kv2: Failed to read secret:\n\t", err)
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
|
@ -1,7 +1,8 @@
|
||||||
-- Add migration script here
|
-- Add migration script here
|
||||||
|
|
||||||
CREATE TABLE metadata (
|
CREATE TABLE kv2_metadata (
|
||||||
secret_path TEXT PRIMARY KEY NOT NULL,
|
engine_path TEXT NOT NULL,
|
||||||
|
secret_path TEXT NOT NULL,
|
||||||
|
|
||||||
cas_required INTEGER NOT NULL, -- no bool datatype in sqlite
|
cas_required INTEGER NOT NULL, -- no bool datatype in sqlite
|
||||||
created_time TIMESTAMP NOT NULL,
|
created_time TIMESTAMP NOT NULL,
|
||||||
|
|
@ -10,19 +11,22 @@ CREATE TABLE metadata (
|
||||||
-- current_version INTEGER NOT NULL,
|
-- current_version INTEGER NOT NULL,
|
||||||
-- oldest_version INTEGER NOT NULL,
|
-- oldest_version INTEGER NOT NULL,
|
||||||
updated_time TIMESTAMP NOT NULL,
|
updated_time TIMESTAMP NOT NULL,
|
||||||
custom_data TEXT
|
custom_data TEXT,
|
||||||
|
|
||||||
|
PRIMARY KEY (engine_path, secret_path)
|
||||||
);
|
);
|
||||||
|
|
||||||
CREATE TABLE secret_versions (
|
CREATE TABLE kv2_secret_version (
|
||||||
secret_data TEXT NOT NULL,
|
engine_path TEXT NOT NULL,
|
||||||
|
|
||||||
created_time TIMESTAMP NOT NULL,
|
|
||||||
deletion_time TIMESTAMP,
|
|
||||||
|
|
||||||
version_number INTEGER NOT NULL DEFAULT 0,
|
|
||||||
secret_path TEXT NOT NULL,
|
secret_path TEXT NOT NULL,
|
||||||
PRIMARY KEY (secret_path, version_number),
|
|
||||||
FOREIGN KEY (secret_path) REFERENCES metadata(secret_path)
|
|
||||||
);
|
|
||||||
|
|
||||||
CREATE INDEX idx_secret_versions_secret_path ON secret_versions (secret_path);
|
version_number INTEGER NOT NULL CHECK ( version_number > 0 ),
|
||||||
|
created_time DATETIME NOT NULL,
|
||||||
|
deletion_time DATETIME,
|
||||||
|
|
||||||
|
encrypted_data BLOB NOT NULL,
|
||||||
|
nonce BLOB NOT NULL CHECK ( length(nonce) = 12 ),
|
||||||
|
|
||||||
|
PRIMARY KEY (engine_path, secret_path, version_number),
|
||||||
|
FOREIGN KEY (engine_path, secret_path) REFERENCES kv2_metadata(engine_path, secret_path)
|
||||||
|
);
|
||||||
|
|
|
||||||
|
|
@ -1,8 +0,0 @@
|
||||||
-- Add migration script here
|
|
||||||
|
|
||||||
INSERT INTO metadata VALUES ("bar", false, DateTime('now'), "30d", 4, DateTime('now'), '{"foo": "customData"}');
|
|
||||||
|
|
||||||
INSERT INTO secret_versions VALUES ("secret_data", DateTime('now'), DateTime('now'), 1, "bar");
|
|
||||||
INSERT INTO secret_versions VALUES ("more_secret_data", DateTime('now'), datetime('now', '+30 day'), 2, "bar");
|
|
||||||
|
|
||||||
INSERT INTO metadata VALUES ("foo", false, DateTime('now'), "30d", 4, DateTime('now'), '{"foo": "customData"}');
|
|
||||||
8
migrations/20250326160659_sealing.sql
Normal file
8
migrations/20250326160659_sealing.sql
Normal file
|
|
@ -0,0 +1,8 @@
|
||||||
|
-- Sealing Key
|
||||||
|
|
||||||
|
CREATE TABLE root_key (
|
||||||
|
version INTEGER PRIMARY KEY CHECK ( version = 1 ),
|
||||||
|
encrypted_key BLOB NOT NULL,
|
||||||
|
nonce BLOB,
|
||||||
|
type TEXT NOT NULL CHECK ( type IN ('dev_only', 'simple', 'shamir') )
|
||||||
|
);
|
||||||
25
migrations/20250407112735_BasicIdentity.sql
Normal file
25
migrations/20250407112735_BasicIdentity.sql
Normal file
|
|
@ -0,0 +1,25 @@
|
||||||
|
CREATE TABLE identity (
|
||||||
|
id TEXT PRIMARY KEY NOT NULL,
|
||||||
|
name TEXT NOT NULL
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE TABLE service_token_role_membership (
|
||||||
|
role_name TEXT NOT NULL,
|
||||||
|
token_id TEXT NOT NULL
|
||||||
|
REFERENCES service_token(id)
|
||||||
|
ON DELETE CASCADE
|
||||||
|
ON UPDATE CASCADE,
|
||||||
|
PRIMARY KEY (role_name, token_id)
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE TABLE service_token (
|
||||||
|
id TEXT PRIMARY KEY NOT NULL,
|
||||||
|
key TEXT NOT NULL,
|
||||||
|
expiry INTEGER,
|
||||||
|
parent_id TEXT NULL REFERENCES service_token(id)
|
||||||
|
ON DELETE NO ACTION
|
||||||
|
ON UPDATE CASCADE,
|
||||||
|
identity_id TEXT NULL REFERENCES identity(id)
|
||||||
|
ON DELETE CASCADE
|
||||||
|
ON UPDATE CASCADE
|
||||||
|
);
|
||||||
11464
openapi-bao.json
Normal file
11464
openapi-bao.json
Normal file
File diff suppressed because it is too large
Load diff
21
src/auth.rs
21
src/auth.rs
|
|
@ -1,13 +1,14 @@
|
||||||
|
pub mod auth_extractor;
|
||||||
|
pub(crate) mod token;
|
||||||
|
|
||||||
|
use crate::auth::token::*;
|
||||||
|
use crate::storage::DbPool;
|
||||||
use axum::Router;
|
use axum::Router;
|
||||||
|
|
||||||
use crate::storage::DatabaseDriver;
|
/// Authentication routes
|
||||||
|
pub fn auth_router(pool: DbPool) -> Router<DbPool> {
|
||||||
// route prefix: `/auth/token/`
|
// The token auth router handles all token-related authentication routes
|
||||||
// mod token;
|
Router::new()
|
||||||
|
.nest("/token", token_auth_router(pool.clone()))
|
||||||
// use self::token::token_auth_router;
|
.with_state(pool)
|
||||||
|
|
||||||
pub fn auth_router(pool: DatabaseDriver) -> Router<DatabaseDriver> {
|
|
||||||
Router::new().with_state(pool)
|
|
||||||
// .nest("/token", token_auth_router())
|
|
||||||
}
|
}
|
||||||
|
|
|
||||||
66
src/auth/auth_extractor.rs
Normal file
66
src/auth/auth_extractor.rs
Normal file
|
|
@ -0,0 +1,66 @@
|
||||||
|
use crate::auth::token::{TokenDTO, get_roles_from_token, get_token_from_key};
|
||||||
|
use crate::storage::DbPool;
|
||||||
|
use axum::body::Body;
|
||||||
|
use axum::extract::FromRequestParts;
|
||||||
|
use axum::http::request::Parts;
|
||||||
|
use axum::http::{HeaderMap, Request, StatusCode, header};
|
||||||
|
use std::fmt::Debug;
|
||||||
|
|
||||||
|
// Currently unused but for usage in the future
|
||||||
|
#[allow(unused)]
|
||||||
|
/// AuthInfo is an extractor that retrieves authentication information from the request.
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub struct AuthInfo {
|
||||||
|
token: TokenDTO,
|
||||||
|
roles: Vec<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl FromRequestParts<DbPool> for AuthInfo {
|
||||||
|
type Rejection = StatusCode;
|
||||||
|
|
||||||
|
/// Extracts authentication information from the request parts.
|
||||||
|
async fn from_request_parts(
|
||||||
|
parts: &mut Parts,
|
||||||
|
state: &DbPool,
|
||||||
|
) -> Result<Self, Self::Rejection> {
|
||||||
|
let header = &parts.headers;
|
||||||
|
|
||||||
|
inspect_with_header(state, header).await
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Currently unused but for usage in the future
|
||||||
|
#[allow(unused)]
|
||||||
|
/// Extracts the headers from request and returns the result from inspect_with_header function.
|
||||||
|
pub async fn inspect_req(state: &DbPool, req: &Request<Body>) -> Result<AuthInfo, StatusCode> {
|
||||||
|
let header = req.headers();
|
||||||
|
inspect_with_header(state, header).await
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Inspects the request headers and extracts authentication information.
|
||||||
|
/// Returns an `AuthInfo` struct containing the token and roles if successful.
|
||||||
|
/// If the authorization header is missing or invalid, it returns a `StatusCode::UNAUTHORIZED`.
|
||||||
|
///
|
||||||
|
/// This function is intentionally separated so it can be used from
|
||||||
|
/// within the Axum extractor as well as in other functions.
|
||||||
|
pub async fn inspect_with_header(
|
||||||
|
state: &DbPool,
|
||||||
|
header: &HeaderMap,
|
||||||
|
) -> Result<AuthInfo, StatusCode> {
|
||||||
|
let auth_header = header
|
||||||
|
.get(header::AUTHORIZATION)
|
||||||
|
.and_then(|value| value.to_str().ok());
|
||||||
|
|
||||||
|
match auth_header {
|
||||||
|
Some(auth_value) => {
|
||||||
|
let token = get_token_from_key(auth_value, state).await;
|
||||||
|
if token.is_err() {
|
||||||
|
return Err(StatusCode::UNAUTHORIZED);
|
||||||
|
}
|
||||||
|
let token = token.unwrap();
|
||||||
|
let roles = get_roles_from_token(&token, state).await;
|
||||||
|
Ok(AuthInfo { token, roles })
|
||||||
|
}
|
||||||
|
None => Err(StatusCode::UNAUTHORIZED),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -1,45 +1,286 @@
|
||||||
use axum::Router;
|
// There are some placeholder functions, that will have to be implemented before the first release.
|
||||||
|
// They are marked with `todo!()` to indicate that they need to be implemented.
|
||||||
|
// We want to keep these functions in the codebase.
|
||||||
|
// That is why we choose to suppress unused warnings for now.
|
||||||
|
// TODO
|
||||||
|
#![allow(unused)]
|
||||||
|
|
||||||
pub fn token_auth_router() -> Router {
|
use crate::storage::DbPool;
|
||||||
Router::new()
|
use axum::extract::State;
|
||||||
|
use axum::http::StatusCode;
|
||||||
|
use axum::response::{IntoResponse, Response};
|
||||||
|
use axum::routing::post;
|
||||||
|
use axum::{Json, Router};
|
||||||
|
use log::error;
|
||||||
|
use rand::{Rng, distributions::Alphanumeric};
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
use sqlx::Error;
|
||||||
|
use uuid::Uuid;
|
||||||
|
|
||||||
|
#[derive(Debug, Serialize)]
|
||||||
|
pub struct IdentityDTO {
|
||||||
|
id: String,
|
||||||
|
name: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn get_accessors() {}
|
#[derive(Debug)]
|
||||||
|
pub struct TokenDTO {
|
||||||
|
key: String,
|
||||||
|
id: String,
|
||||||
|
identity_id: Option<String>,
|
||||||
|
parent_id: Option<String>,
|
||||||
|
expiry: Option<i64>,
|
||||||
|
}
|
||||||
|
|
||||||
async fn post_create() {}
|
#[derive(Debug)]
|
||||||
|
pub struct TokenRoleMembershipDTO {
|
||||||
|
role_name: String,
|
||||||
|
token_id: String,
|
||||||
|
}
|
||||||
|
|
||||||
async fn post_create_orphan() {}
|
/// Represents a request body for the `/auth/token/lookup` endpoint.
|
||||||
|
#[derive(Deserialize)]
|
||||||
|
struct RequestBodyPostLookup {
|
||||||
|
token: String,
|
||||||
|
}
|
||||||
|
|
||||||
async fn post_create_role() {}
|
/// Represents the response body for the `/auth/token/lookup` endpoint.
|
||||||
|
#[derive(Serialize)]
|
||||||
|
struct TokenLookupResponse {
|
||||||
|
id: String,
|
||||||
|
type_name: String,
|
||||||
|
roles: Vec<String>,
|
||||||
|
}
|
||||||
|
|
||||||
async fn get_lookup() {}
|
/// Represents an error response for the API.
|
||||||
|
#[derive(Serialize)]
|
||||||
|
struct ErrorResponse {
|
||||||
|
error: String,
|
||||||
|
}
|
||||||
|
|
||||||
async fn post_lookup() {}
|
/// Generates a random string of the specified length using alphanumeric characters.
|
||||||
|
// TODO: Make string generation secure
|
||||||
|
fn get_random_string(len: usize) -> String {
|
||||||
|
rand::thread_rng()
|
||||||
|
.sample_iter(&Alphanumeric)
|
||||||
|
.take(len)
|
||||||
|
.map(char::from)
|
||||||
|
.collect()
|
||||||
|
}
|
||||||
|
|
||||||
async fn get_lookup_self() {}
|
/// Creates a root token if none exists in the database.
|
||||||
|
/// Returns true if a new root token was created, false if one already exists.
|
||||||
|
pub async fn create_root_token_if_none_exist(pool: &DbPool) -> bool {
|
||||||
|
// Check if a root token already exists
|
||||||
|
let exists = sqlx::query!(
|
||||||
|
r#"SELECT service_token.* FROM service_token, service_token_role_membership
|
||||||
|
WHERE service_token.id = service_token_role_membership.token_id AND
|
||||||
|
service_token_role_membership.role_name = 'root'
|
||||||
|
LIMIT 1"#
|
||||||
|
)
|
||||||
|
.fetch_one(pool)
|
||||||
|
.await
|
||||||
|
.is_ok();
|
||||||
|
if exists {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
// If no root token exists, create one
|
||||||
|
let result = create_root_token(pool).await;
|
||||||
|
if result.is_err() {
|
||||||
|
let error = result.err().unwrap();
|
||||||
|
// Log the error and panic
|
||||||
|
error!("create_root_token failed: {error:?}");
|
||||||
|
panic!("create_root_token failed: {error:?}");
|
||||||
|
}
|
||||||
|
// If successful, print the root token. This will only happen once.
|
||||||
|
println!("\n\nYour root token is: {}", result.unwrap());
|
||||||
|
println!("It will only be displayed once!\n\n");
|
||||||
|
true
|
||||||
|
}
|
||||||
|
|
||||||
async fn post_lookup_self() {}
|
/// Creates a root token in the database.
|
||||||
|
async fn create_root_token(pool: &DbPool) -> Result<String, Error> {
|
||||||
|
let id = Uuid::new_v4().to_string();
|
||||||
|
let key = "s.".to_string() + &get_random_string(24);
|
||||||
|
// Insert the root token into the database
|
||||||
|
let result = sqlx::query!(r#"
|
||||||
|
INSERT INTO service_token (id, key) VALUES ($1, $2);
|
||||||
|
INSERT INTO service_token_role_membership (token_id, role_name) VALUES ($3, 'root');
|
||||||
|
"#, id, key, id).execute(pool).await;
|
||||||
|
// If the insert was successful, return the key
|
||||||
|
if result.is_ok() {
|
||||||
|
return Ok(key);
|
||||||
|
}
|
||||||
|
// Else, return the error
|
||||||
|
Err(result.unwrap_err())
|
||||||
|
}
|
||||||
|
|
||||||
async fn post_renew() {}
|
/// Gets the current time in seconds since unix epoch
|
||||||
|
fn get_time_as_int() -> i64 {
|
||||||
|
std::time::SystemTime::now()
|
||||||
|
.duration_since(std::time::UNIX_EPOCH)
|
||||||
|
.unwrap()
|
||||||
|
.as_secs() as i64
|
||||||
|
}
|
||||||
|
|
||||||
async fn post_renew_accessor() {}
|
/// Gets the type of token. (The first character of the key always specifies the type)
|
||||||
|
fn get_token_type(token: &TokenDTO) -> Result<String, &str> {
|
||||||
|
Ok(match token.key.clone().chars().next().unwrap_or('?') {
|
||||||
|
's' => "service",
|
||||||
|
'b' => "batch",
|
||||||
|
'r' => "recovery",
|
||||||
|
_ => {
|
||||||
|
error!("Unsupported token type");
|
||||||
|
return Err("Unsupported token type");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
.to_string())
|
||||||
|
}
|
||||||
|
|
||||||
async fn post_renew_self() {}
|
/// Retrieves a token from the database using its key.
|
||||||
|
/// If the token is found and not expired, it returns the token.
|
||||||
|
/// Else, it returns an error.
|
||||||
|
pub async fn get_token_from_key(token_key: &str, pool: &DbPool) -> Result<TokenDTO, Error> {
|
||||||
|
let time = get_time_as_int();
|
||||||
|
sqlx::query_as!(
|
||||||
|
TokenDTO,
|
||||||
|
r#"SELECT * FROM 'service_token' WHERE key = $1 AND (expiry IS NULL OR expiry > $2) LIMIT 1"#,
|
||||||
|
token_key, time).fetch_one(pool).await
|
||||||
|
}
|
||||||
|
|
||||||
async fn post_revoke() {}
|
/// Retrieves the roles associated with a given token from the database.
|
||||||
|
/// If the token does not exist, it returns an empty vector.
|
||||||
|
pub async fn get_roles_from_token(token: &TokenDTO, pool: &DbPool) -> Vec<String> {
|
||||||
|
let result = sqlx::query_as!(
|
||||||
|
TokenRoleMembershipDTO,
|
||||||
|
r#"SELECT * FROM 'service_token_role_membership' WHERE token_id = $1"#,
|
||||||
|
token.id
|
||||||
|
)
|
||||||
|
.fetch_all(pool)
|
||||||
|
.await;
|
||||||
|
result
|
||||||
|
.unwrap_or(Vec::new())
|
||||||
|
.iter()
|
||||||
|
.map(|r| r.role_name.to_string())
|
||||||
|
.collect()
|
||||||
|
}
|
||||||
|
|
||||||
async fn post_revoke_accessor() {}
|
/// Return a router, that may be used to route traffic to the corresponding handlers
|
||||||
|
pub fn token_auth_router(pool: DbPool) -> Router<DbPool> {
|
||||||
|
Router::new()
|
||||||
|
.route("/lookup", post(post_lookup))
|
||||||
|
.with_state(pool)
|
||||||
|
}
|
||||||
|
|
||||||
async fn post_revoke_orphan() {}
|
/// Handles the `/auth/token/lookup` endpoint.
|
||||||
|
/// Retrieves the token and its associated roles from the database using the provided token key.
|
||||||
|
/// The output format does not yet match the openBao specification and is for testing only!
|
||||||
|
async fn post_lookup(
|
||||||
|
State(pool): State<DbPool>,
|
||||||
|
Json(body): Json<RequestBodyPostLookup>,
|
||||||
|
) -> Response {
|
||||||
|
let token_str = body.token;
|
||||||
|
// Validate the token string
|
||||||
|
match get_token_from_key(&token_str, &pool).await {
|
||||||
|
// If the token is found, retrieve its type and roles
|
||||||
|
Ok(token) => {
|
||||||
|
let type_name = get_token_type(&token).unwrap_or_else(|_| String::from("Unknown"));
|
||||||
|
let roles = get_roles_from_token(&token, &pool).await;
|
||||||
|
let resp = TokenLookupResponse {
|
||||||
|
id: token.id,
|
||||||
|
type_name,
|
||||||
|
roles,
|
||||||
|
};
|
||||||
|
// Return the token information as a JSON response
|
||||||
|
(StatusCode::OK, axum::Json(resp)).into_response()
|
||||||
|
}
|
||||||
|
// If the token is not found, return a 404 Not Found error
|
||||||
|
Err(e) => {
|
||||||
|
error!("Failed to retrieve token: {e:?}");
|
||||||
|
let err = ErrorResponse {
|
||||||
|
error: "Failed to retrieve token".to_string(),
|
||||||
|
};
|
||||||
|
(StatusCode::NOT_FOUND, axum::Json(err)).into_response()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
async fn post_revoke_self() {}
|
//
|
||||||
|
// The following functions are placeholders for the various token-related operations.
|
||||||
|
//
|
||||||
|
|
||||||
async fn get_roles() {}
|
async fn get_accessors() -> &'static str {
|
||||||
|
todo!("not implemented")
|
||||||
|
}
|
||||||
|
|
||||||
async fn get_role_by_name() {}
|
async fn post_create() -> &'static str {
|
||||||
|
todo!("not implemented")
|
||||||
|
}
|
||||||
|
|
||||||
async fn post_role_by_name() {}
|
async fn post_create_orphan() -> &'static str {
|
||||||
|
todo!("not implemented")
|
||||||
|
}
|
||||||
|
|
||||||
async fn delete_role_by_name() {}
|
async fn post_create_role() -> &'static str {
|
||||||
|
todo!("not implemented")
|
||||||
|
}
|
||||||
|
|
||||||
async fn post_tidy() {}
|
async fn get_lookup() -> &'static str {
|
||||||
|
todo!("not implemented")
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn get_lookup_self() -> &'static str {
|
||||||
|
todo!("not implemented")
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn post_lookup_self() -> &'static str {
|
||||||
|
todo!("not implemented")
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn post_renew() -> &'static str {
|
||||||
|
todo!("not implemented")
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn post_renew_accessor() -> &'static str {
|
||||||
|
todo!("not implemented")
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn post_renew_self() -> &'static str {
|
||||||
|
todo!("not implemented")
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn post_revoke() -> &'static str {
|
||||||
|
todo!("not implemented")
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn post_revoke_accessor() -> &'static str {
|
||||||
|
todo!("not implemented")
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn post_revoke_orphan() -> &'static str {
|
||||||
|
todo!("not implemented")
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn post_revoke_self() -> &'static str {
|
||||||
|
todo!("not implemented")
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn get_roles() -> &'static str {
|
||||||
|
todo!("not implemented")
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn get_role_by_name() -> &'static str {
|
||||||
|
todo!("not implemented")
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn post_role_by_name() -> &'static str {
|
||||||
|
todo!("not implemented")
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn delete_role_by_name() -> &'static str {
|
||||||
|
todo!("not implemented")
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn post_tidy() -> &'static str {
|
||||||
|
todo!("not implemented")
|
||||||
|
}
|
||||||
|
|
|
||||||
|
|
@ -1,8 +1,8 @@
|
||||||
use axum::{
|
use axum::{
|
||||||
|
Json,
|
||||||
body::Body,
|
body::Body,
|
||||||
http::StatusCode,
|
http::StatusCode,
|
||||||
response::{IntoResponse, Response},
|
response::{IntoResponse, Response},
|
||||||
Json,
|
|
||||||
};
|
};
|
||||||
use serde::Serialize;
|
use serde::Serialize;
|
||||||
|
|
||||||
|
|
@ -13,11 +13,11 @@ pub struct HttpError {
|
||||||
}
|
}
|
||||||
|
|
||||||
impl HttpError {
|
impl HttpError {
|
||||||
pub fn new(status_code: StatusCode, errors: Vec<String>) -> Response<Body> {
|
pub fn multiple_errors(status_code: StatusCode, errors: Vec<String>) -> Response<Body> {
|
||||||
(status_code, Json(HttpError { errors })).into_response()
|
(status_code, Json(HttpError { errors })).into_response()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn simple(status_code: StatusCode, error: impl ToString) -> Response<Body> {
|
pub fn simple(status_code: StatusCode, error: impl ToString) -> Response<Body> {
|
||||||
HttpError::new(status_code, vec![error.to_string(); 1])
|
HttpError::multiple_errors(status_code, vec![error.to_string(); 1])
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -1,33 +1,38 @@
|
||||||
pub mod kv;
|
pub mod kv;
|
||||||
|
|
||||||
use axum::{
|
use axum::{
|
||||||
|
Extension, Router,
|
||||||
body::Body,
|
body::Body,
|
||||||
extract::{Request, State},
|
extract::{Request, State},
|
||||||
http::{StatusCode, Uri},
|
http::{StatusCode, Uri},
|
||||||
response::{IntoResponse, Response},
|
response::{IntoResponse, Response},
|
||||||
Extension, Router,
|
|
||||||
};
|
};
|
||||||
use log::*;
|
use log::*;
|
||||||
use tower::Service;
|
use tower::Service;
|
||||||
|
|
||||||
use crate::storage::DatabaseDriver;
|
use crate::{common::HttpError, storage::DbPool};
|
||||||
|
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
/// State to be used to store the database pool
|
/// State to be used to store the database pool
|
||||||
/// and the routers for each engine
|
/// and the routers for each engine.
|
||||||
struct EngineMapperState {
|
struct EngineMapperState {
|
||||||
pool: DatabaseDriver,
|
pool: DbPool,
|
||||||
kv_v2: Router,
|
kv_v2: Router,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Secret engine router
|
#[derive(Clone)]
|
||||||
pub fn secrets_router(pool: DatabaseDriver) -> Router<DatabaseDriver> {
|
struct EnginePath(String);
|
||||||
|
|
||||||
|
/// Secret engine router.
|
||||||
|
/// Dynamically puts requests into routers depending on database content.
|
||||||
|
pub fn secrets_router(pool: DbPool) -> Router<DbPool> {
|
||||||
// State containing the pool and engine routers
|
// State containing the pool and engine routers
|
||||||
let state = EngineMapperState {
|
let state = EngineMapperState {
|
||||||
pool: pool.clone(),
|
pool: pool.clone(),
|
||||||
kv_v2: kv::kv_router(pool.clone()),
|
kv_v2: kv::kv_router(pool.clone()),
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// Problem solved via fallback route
|
||||||
Router::new().fallback(engine_handler).with_state(state)
|
Router::new().fallback(engine_handler).with_state(state)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -38,7 +43,7 @@ async fn engine_handler(
|
||||||
req: Request,
|
req: Request,
|
||||||
) -> Response<Body> {
|
) -> Response<Body> {
|
||||||
if let Some((mount_path, engine_type)) = map_mount_points(req.uri(), &engines.pool).await {
|
if let Some((mount_path, engine_type)) = map_mount_points(req.uri(), &engines.pool).await {
|
||||||
info!("Found mount point {} of type {}", mount_path, engine_type);
|
info!("Found mount point {mount_path} of type {engine_type}");
|
||||||
// Match the engine type to the appropriate router
|
// Match the engine type to the appropriate router
|
||||||
match engine_type.as_str() {
|
match engine_type.as_str() {
|
||||||
"kv_v2" => call_router(engines.kv_v2, mount_path, req).await,
|
"kv_v2" => call_router(engines.kv_v2, mount_path, req).await,
|
||||||
|
|
@ -47,7 +52,7 @@ async fn engine_handler(
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// Otherwise, the mount path could not be found
|
// Otherwise, the mount path could not be found
|
||||||
(StatusCode::NOT_FOUND, "Mount path not found").into_response()
|
HttpError::simple(StatusCode::NOT_FOUND, "Secret engine mount path not found")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -55,6 +60,7 @@ async fn engine_handler(
|
||||||
async fn call_router(engine: Router, mount_path: String, mut req: Request) -> Response {
|
async fn call_router(engine: Router, mount_path: String, mut req: Request) -> Response {
|
||||||
let rui = req.uri().path().replace(&mount_path, "").parse().unwrap();
|
let rui = req.uri().path().replace(&mount_path, "").parse().unwrap();
|
||||||
*req.uri_mut() = rui;
|
*req.uri_mut() = rui;
|
||||||
|
let mount_path = EnginePath(mount_path);
|
||||||
|
|
||||||
engine
|
engine
|
||||||
.layer(Extension(mount_path))
|
.layer(Extension(mount_path))
|
||||||
|
|
@ -66,17 +72,17 @@ async fn call_router(engine: Router, mount_path: String, mut req: Request) -> Re
|
||||||
/// HTTP error response for unknown engine types
|
/// HTTP error response for unknown engine types
|
||||||
/// Occurs when the mount path is found in the database
|
/// Occurs when the mount path is found in the database
|
||||||
/// but the registered is unknown
|
/// but the registered is unknown
|
||||||
fn unknown_engine(engine_type: String) -> (StatusCode, String) {
|
fn unknown_engine(engine_type: String) -> impl IntoResponse {
|
||||||
error!("Engine type {} not implemented", engine_type);
|
error!("Engine type {engine_type} not implemented");
|
||||||
(
|
HttpError::simple(
|
||||||
StatusCode::INTERNAL_SERVER_ERROR,
|
StatusCode::INTERNAL_SERVER_ERROR,
|
||||||
format!("Engine type {} not implemented", engine_type),
|
format!("Engine type {engine_type} not implemented"),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the mount path and engine type for the request,
|
/// Returns the mount path and engine type for the request,
|
||||||
/// if the mount path is registed at the database
|
/// if the mount path is registed at the database
|
||||||
async fn map_mount_points(req: &Uri, pool: &DatabaseDriver) -> Option<(String, String)> {
|
async fn map_mount_points(req: &Uri, pool: &DbPool) -> Option<(String, String)> {
|
||||||
let mut mount_path_fragments: Vec<&str> = req.path().split('/').collect();
|
let mut mount_path_fragments: Vec<&str> = req.path().split('/').collect();
|
||||||
|
|
||||||
// Find longest matching existing mount path for the request
|
// Find longest matching existing mount path for the request
|
||||||
|
|
|
||||||
|
|
@ -1,43 +1,31 @@
|
||||||
// pub mod logic;
|
mod data;
|
||||||
pub mod db_structs;
|
mod meta;
|
||||||
pub mod http_structs;
|
mod structs;
|
||||||
|
|
||||||
// #[cfg(test)]
|
// #[cfg(test)]
|
||||||
// mod tests;
|
// mod tests;
|
||||||
|
|
||||||
use crate::{engines::kv::http_structs::*, storage::DatabaseDriver};
|
use crate::storage::DbPool;
|
||||||
use axum::{
|
use axum::{Router, routing::*};
|
||||||
extract::{self, Path, State},
|
|
||||||
http::StatusCode,
|
|
||||||
response::IntoResponse,
|
|
||||||
routing::*,
|
|
||||||
Json, Router,
|
|
||||||
};
|
|
||||||
|
|
||||||
use chrono::{DateTime, Utc};
|
pub fn kv_router(pool: DbPool) -> Router {
|
||||||
use db_structs::*;
|
|
||||||
use log::{error, info};
|
|
||||||
use serde_json;
|
|
||||||
|
|
||||||
use sqlx::{Row, Sqlite};
|
|
||||||
use std::{collections::HashMap, convert::Infallible};
|
|
||||||
|
|
||||||
pub fn kv_router(pool: DatabaseDriver) -> Router {
|
|
||||||
Router::new()
|
Router::new()
|
||||||
.route("/config", get(get_config))
|
.route("/config", get(get_config))
|
||||||
.route("/config", post(post_config))
|
.route("/config", post(post_config))
|
||||||
.route("/data/*path", get(get_data))
|
.route("/data/{*path}", get(data::get_data))
|
||||||
// .route("/:mount_path/data/*path/", get(get_data))
|
// .route("/:mount_path/data/*path/", get(get_data))
|
||||||
.route("/data/*path", post(post_data))
|
.route("/data/{*path}", post(data::post_data))
|
||||||
.route("/data/*path", delete(delete_data))
|
// Why does HC V SDK expect PUT instead of POST - neither in the docs nor spec
|
||||||
.route("/delete/*path", post(delete_path))
|
.route("/data/{*path}", put(data::post_data))
|
||||||
.route("/destroy/*path", post(destroy_path))
|
.route("/data/{*path}", delete(data::delete_data))
|
||||||
.route("/metadata/*path", get(get_meta))
|
.route("/delete/{*path}", post(meta::delete_path))
|
||||||
|
.route("/destroy/{*path}", post(meta::destroy_path))
|
||||||
|
.route("/metadata/{*path}", get(meta::get_meta))
|
||||||
// .route("/:mount_path/metadata/*path/", get(get_meta))
|
// .route("/:mount_path/metadata/*path/", get(get_meta))
|
||||||
.route("/metadata/*path", post(post_meta))
|
.route("/metadata/{*path}", post(meta::post_meta))
|
||||||
.route("/metadata/*path", delete(delete_meta))
|
.route("/metadata/{*path}", delete(meta::delete_meta))
|
||||||
.route("/subkeys/*path", get(get_subkeys))
|
.route("/subkeys/{*path}", get(get_subkeys))
|
||||||
.route("/undelete/*path", post(post_undelete))
|
.route("/undelete/{*path}", post(post_undelete))
|
||||||
.with_state(pool)
|
.with_state(pool)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -49,331 +37,6 @@ async fn post_config() -> &'static str {
|
||||||
todo!("not implemented")
|
todo!("not implemented")
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn get_data(
|
|
||||||
State(pool): State<DatabaseDriver>,
|
|
||||||
Path(path): Path<String>,
|
|
||||||
) -> Result<impl IntoResponse, Infallible> {
|
|
||||||
match sqlx::query("SELECT * FROM secret_versions WHERE secret_path = $1")
|
|
||||||
.bind(path)
|
|
||||||
.fetch_one(&pool)
|
|
||||||
.await
|
|
||||||
{
|
|
||||||
Ok(v) => {
|
|
||||||
let version: i64 = v.get("version_number");
|
|
||||||
let secret_content: HashMap<String, String> = HashMap::from([
|
|
||||||
// Consider using sqlx to parse the row to a struct, do not do it manually
|
|
||||||
("secret_data".to_string(), v.get("secret_data")),
|
|
||||||
("created_time".to_string(), v.get("created_time")),
|
|
||||||
("deletion_time".to_string(), v.get("deletion_time")),
|
|
||||||
("version_number".to_string(), version.to_string()),
|
|
||||||
("secret_path".to_string(), v.get("secret_path")),
|
|
||||||
]);
|
|
||||||
let return_secret = KvSecretRes::new(KvSecretResData {
|
|
||||||
created_time: DateTime::parse_from_rfc3339(
|
|
||||||
secret_content
|
|
||||||
.get("created_time")
|
|
||||||
.unwrap_or(&"".to_string()),
|
|
||||||
)
|
|
||||||
.unwrap_or_default()
|
|
||||||
.to_utc(), // TODO
|
|
||||||
custom_metadata: None,
|
|
||||||
deletion_time: None,
|
|
||||||
destroyed: false,
|
|
||||||
version: version,
|
|
||||||
});
|
|
||||||
info!("{:?}", return_secret);
|
|
||||||
|
|
||||||
Ok((StatusCode::OK, Json(return_secret)).into_response())
|
|
||||||
}
|
|
||||||
Err(e) => match e {
|
|
||||||
sqlx::Error::RowNotFound => {
|
|
||||||
error!("{:?}", e);
|
|
||||||
let error_struct: ErrorStruct = ErrorStruct { err: e.to_string() };
|
|
||||||
error!("{:?}", error_struct.err);
|
|
||||||
Ok(error_struct.into_response()) // API doesn't specify return value in case of error. Error struct correct? Else send empty secret back?
|
|
||||||
// let error_secret = KvSecretRes{data: None, options: None};
|
|
||||||
// Ok(Json())
|
|
||||||
}
|
|
||||||
_ => panic!("{:?}", e),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn post_data(
|
|
||||||
State(pool): State<DatabaseDriver>,
|
|
||||||
Path(path): Path<String>,
|
|
||||||
extract::Json(payload): extract::Json<KvSecretReq>,
|
|
||||||
) -> Result<impl IntoResponse, Infallible> {
|
|
||||||
// Insert Metadata first -> Else: Error because of foreign key constraint
|
|
||||||
// In a later implementation, a Metadata with default values from the config will be created
|
|
||||||
|
|
||||||
log::debug!(
|
|
||||||
"Secret: {}, Content: {:?}, Version: {:?}, path: {}",
|
|
||||||
path,
|
|
||||||
payload.data,
|
|
||||||
payload.options,
|
|
||||||
path
|
|
||||||
);
|
|
||||||
|
|
||||||
let mut highest_num = 0;
|
|
||||||
match sqlx::query("SELECT version_number FROM secret_versions WHERE secret_path = $1")
|
|
||||||
.bind(&path)
|
|
||||||
.fetch_all(&pool)
|
|
||||||
.await
|
|
||||||
{
|
|
||||||
Ok(v) => {
|
|
||||||
for curr_ver in v {
|
|
||||||
let curr_num = curr_ver.get("version_number");
|
|
||||||
if highest_num < curr_num {
|
|
||||||
// should be the max of the available version numbers
|
|
||||||
highest_num = curr_num;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Err(e) => {
|
|
||||||
log::error!("Error: {}", e)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
let version = highest_num + 1;
|
|
||||||
log::debug!("{:?}", version);
|
|
||||||
let data = serde_json::to_string(&payload.data).unwrap();
|
|
||||||
log::debug!("Received data: {:?}", data);
|
|
||||||
let created_time = Utc::now();
|
|
||||||
let created_time_string = created_time.to_string();
|
|
||||||
let deletion_time = "12-12-2024 12:00:00"; // TODO
|
|
||||||
|
|
||||||
match sqlx::query("INSERT INTO secret_versions VALUES ($1, $2, $3, $4, $5)")
|
|
||||||
.bind(&data)
|
|
||||||
.bind(created_time_string)
|
|
||||||
.bind(deletion_time)
|
|
||||||
.bind(&version)
|
|
||||||
.bind(&path)
|
|
||||||
.execute(&pool)
|
|
||||||
.await
|
|
||||||
{
|
|
||||||
Ok(v) => {
|
|
||||||
info!("{:?}", v);
|
|
||||||
|
|
||||||
let return_struct = KvSecretResData {
|
|
||||||
created_time: created_time,
|
|
||||||
custom_metadata: None,
|
|
||||||
deletion_time: None,
|
|
||||||
destroyed: false,
|
|
||||||
version: version,
|
|
||||||
};
|
|
||||||
return Ok((StatusCode::OK, Json(return_struct)).into_response());
|
|
||||||
}
|
|
||||||
Err(e) => {
|
|
||||||
error!("{:?}", e);
|
|
||||||
return Ok((StatusCode::INTERNAL_SERVER_ERROR, Json(e.to_string())).into_response());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// TODO: soft delete the secret version at path. can be undone with undelete_secret
|
|
||||||
// https://developer.hashicorp.com/vault/api-docs/secret/kv/kv-v2#delete-latest-version-of-secret
|
|
||||||
// https://developer.hashicorp.com/vault/api-docs/secret/kv/kv-v2#delete-secret-versions
|
|
||||||
async fn delete_data() -> &'static str {
|
|
||||||
todo!("not implemented")
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn delete_path() -> &'static str {
|
|
||||||
todo!("not implemented")
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn destroy_path(
|
|
||||||
State(pool): State<DatabaseDriver>,
|
|
||||||
Path(kv_path): Path<String>,
|
|
||||||
Json(body): Json<KvSecretDestrReq>,
|
|
||||||
) -> Result<impl IntoResponse, Infallible> {
|
|
||||||
let versions = body.versions;
|
|
||||||
|
|
||||||
for ver in versions {
|
|
||||||
let res_wrapper = sqlx::query::<Sqlite>(
|
|
||||||
"DELETE FROM secret_versions where secret_path = $1 AND version_number = $2",
|
|
||||||
)
|
|
||||||
.bind(&kv_path)
|
|
||||||
.bind(ver)
|
|
||||||
.execute(&pool)
|
|
||||||
.await;
|
|
||||||
|
|
||||||
match res_wrapper {
|
|
||||||
Ok(result) => {
|
|
||||||
if result.rows_affected() == 0 {
|
|
||||||
log::debug!(
|
|
||||||
"No rows were deleted for version {} at path {}",
|
|
||||||
ver,
|
|
||||||
kv_path
|
|
||||||
);
|
|
||||||
} else {
|
|
||||||
log::debug!("Deleted version {} at path {}", ver, kv_path);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Err(e) => {
|
|
||||||
log::error!(
|
|
||||||
"Failed to delete version {} at path {}: {}",
|
|
||||||
ver,
|
|
||||||
kv_path,
|
|
||||||
e
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Ok(StatusCode::NO_CONTENT)
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn get_meta(
|
|
||||||
State(pool): State<DatabaseDriver>,
|
|
||||||
Path(kv_path): Path<String>,
|
|
||||||
) -> Result<impl IntoResponse, Infallible> {
|
|
||||||
log::debug!("Path: {}", kv_path);
|
|
||||||
|
|
||||||
let mut metadata_res: KvMetaRes = KvMetaRes::default();
|
|
||||||
|
|
||||||
let dbmeta = sqlx::query_as::<_, DbSecretMeta>("SELECT * FROM metadata where secret_path = $1")
|
|
||||||
.bind(&kv_path)
|
|
||||||
.fetch_optional(&pool)
|
|
||||||
.await;
|
|
||||||
|
|
||||||
match dbmeta {
|
|
||||||
Ok(Some(dbmeta)) => {
|
|
||||||
metadata_res.data = KvMetaResData {
|
|
||||||
created_time: dbmeta.created_time,
|
|
||||||
|
|
||||||
// map the custom_data to a Hashmap
|
|
||||||
custom_metadata: dbmeta.custom_data.map(|data| {
|
|
||||||
serde_json::from_str::<HashMap<String, String>>(&data)
|
|
||||||
.unwrap_or_else(|_| HashMap::new())
|
|
||||||
}),
|
|
||||||
|
|
||||||
cas_required: dbmeta.cas_required,
|
|
||||||
max_versions: dbmeta.max_versions,
|
|
||||||
updated_time: dbmeta.updated_time,
|
|
||||||
delete_version_after: dbmeta.delete_version_after,
|
|
||||||
current_version: 0,
|
|
||||||
oldest_version: 0,
|
|
||||||
versions: HashMap::new(),
|
|
||||||
};
|
|
||||||
|
|
||||||
let version_data = sqlx::query_as::<_, DbSecretVersionMeta>("SELECT version_number, created_time, deletion_time FROM secret_versions WHERE secret_path = $1")
|
|
||||||
.bind(&kv_path)
|
|
||||||
.fetch_all(&pool)
|
|
||||||
.await;
|
|
||||||
log::debug!("found version_data: {:?}", version_data);
|
|
||||||
|
|
||||||
if let Ok(version_data) = version_data {
|
|
||||||
// 1. iterate through all version data
|
|
||||||
// 2. put all version numbers as keys in the hashmap. the rest of the values values should be the value
|
|
||||||
let mut parsed_versions: HashMap<i64, KvMetaResVersionData> = HashMap::new();
|
|
||||||
let now = Utc::now();
|
|
||||||
|
|
||||||
for curr_ver in version_data {
|
|
||||||
let curr_num = curr_ver.version_number;
|
|
||||||
let data = KvMetaResVersionData {
|
|
||||||
created_time: curr_ver.created_time,
|
|
||||||
deletion_time: curr_ver.deletion_time,
|
|
||||||
destroyed: if curr_ver.deletion_time < now {
|
|
||||||
true
|
|
||||||
} else {
|
|
||||||
false
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
if metadata_res.data.current_version < curr_num {
|
|
||||||
// the max of the available version numbers
|
|
||||||
metadata_res.data.current_version = curr_num;
|
|
||||||
}
|
|
||||||
if metadata_res.data.oldest_version > curr_num {
|
|
||||||
// the min of the available version numbers
|
|
||||||
metadata_res.data.oldest_version = curr_num;
|
|
||||||
}
|
|
||||||
|
|
||||||
parsed_versions.insert(curr_num, data);
|
|
||||||
}
|
|
||||||
|
|
||||||
metadata_res.data.versions = parsed_versions;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(None) => {
|
|
||||||
return Ok((StatusCode::BAD_REQUEST, Json("No metadata found")).into_response());
|
|
||||||
}
|
|
||||||
Err(e) => {
|
|
||||||
log::error!("Database error: {}", e);
|
|
||||||
return Ok((
|
|
||||||
StatusCode::INTERNAL_SERVER_ERROR,
|
|
||||||
Json("Internal server error"),
|
|
||||||
)
|
|
||||||
.into_response());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
let json_string = serde_json::to_string(&metadata_res).unwrap();
|
|
||||||
log::debug!("Returning response: {}", json_string);
|
|
||||||
|
|
||||||
Ok((StatusCode::OK, Json(metadata_res)).into_response())
|
|
||||||
}
|
|
||||||
|
|
||||||
// currently only writes the metadata - Not case if already exists
|
|
||||||
async fn post_meta(
|
|
||||||
State(pool): State<DatabaseDriver>,
|
|
||||||
Path(kv_path): Path<String>,
|
|
||||||
Json(body): Json<KvMetaReq>,
|
|
||||||
// extract::Json(body): extract::Json<http_structs::KvMetaReq>,
|
|
||||||
) -> Result<impl IntoResponse, Infallible> {
|
|
||||||
let now = Utc::now();
|
|
||||||
let custom_metadata: String = match serde_json::to_string(&body.custom_metadata) {
|
|
||||||
Ok(data) => data,
|
|
||||||
Err(_) => {
|
|
||||||
log::error!(
|
|
||||||
"could not serialize custom_metadata: {:?}\ndropping data",
|
|
||||||
body.custom_metadata
|
|
||||||
);
|
|
||||||
String::new()
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
let new_metadata = DbSecretMeta {
|
|
||||||
cas_required: body.cas_required.unwrap_or(false),
|
|
||||||
secret_path: kv_path,
|
|
||||||
created_time: now,
|
|
||||||
delete_version_after: Some("30d".to_string()),
|
|
||||||
max_versions: body.max_versions,
|
|
||||||
updated_time: now,
|
|
||||||
|
|
||||||
custom_data: Some(custom_metadata),
|
|
||||||
};
|
|
||||||
|
|
||||||
match sqlx::query("INSERT INTO metadata (cas_required, secret_path, created_time, delete_version_after, max_versions, updated_time, custom_data) VALUES ($1, $2, $3, $4, $5, $6, $7)")
|
|
||||||
.bind(new_metadata.cas_required)
|
|
||||||
.bind(new_metadata.secret_path)
|
|
||||||
.bind(new_metadata.created_time)
|
|
||||||
.bind(new_metadata.delete_version_after.unwrap())
|
|
||||||
.bind(new_metadata.max_versions)
|
|
||||||
.bind(new_metadata.updated_time)
|
|
||||||
.bind(new_metadata.custom_data.unwrap())
|
|
||||||
.execute(&pool)
|
|
||||||
.await
|
|
||||||
{
|
|
||||||
Ok(result) => {
|
|
||||||
info!("{:?}", result);
|
|
||||||
if result.rows_affected() == 0 {
|
|
||||||
log::error!("Failed to insert metadata");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Err(e) => {
|
|
||||||
error!("{:?}", e);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
Ok(StatusCode::NO_CONTENT)
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn delete_meta() -> &'static str {
|
|
||||||
todo!("not implemented")
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn get_subkeys() -> &'static str {
|
async fn get_subkeys() -> &'static str {
|
||||||
todo!("not implemented")
|
todo!("not implemented")
|
||||||
}
|
}
|
||||||
|
|
|
||||||
253
src/engines/kv/data.rs
Normal file
253
src/engines/kv/data.rs
Normal file
|
|
@ -0,0 +1,253 @@
|
||||||
|
// There are some placeholder functions, that will have to be implemented before the first release.
|
||||||
|
// They are marked with `todo!()` to indicate that they need to be implemented.
|
||||||
|
// We want to keep these functions in the codebase.
|
||||||
|
// That is why we choose to suppress unused warnings for now.
|
||||||
|
// TODO
|
||||||
|
#![allow(unused)]
|
||||||
|
|
||||||
|
use super::structs::KvV2WriteRequest;
|
||||||
|
use crate::{
|
||||||
|
DbPool,
|
||||||
|
common::HttpError,
|
||||||
|
engines::{
|
||||||
|
EnginePath,
|
||||||
|
kv::structs::{KvSecretData, KvSecretRes, KvV2WriteResponse, Wrapper},
|
||||||
|
},
|
||||||
|
storage::sealing::Secret,
|
||||||
|
};
|
||||||
|
use axum::{
|
||||||
|
Extension, Json,
|
||||||
|
extract::{Path, Query, State},
|
||||||
|
http::StatusCode,
|
||||||
|
response::{IntoResponse, NoContent, Response},
|
||||||
|
};
|
||||||
|
use log::{debug, error, info, warn};
|
||||||
|
use serde::Deserialize;
|
||||||
|
use time::{OffsetDateTime, UtcDateTime};
|
||||||
|
|
||||||
|
#[derive(Deserialize)]
|
||||||
|
pub struct GetDataQuery {
|
||||||
|
#[serde(default)]
|
||||||
|
/// Version of secret requested to be read.
|
||||||
|
/// Default `0`, to get the most recent version.
|
||||||
|
pub version: u32,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Unluckily needed as `sqlx::query_as!()` does not support FromRow derivations
|
||||||
|
struct SecretDataInternal {
|
||||||
|
pub created_time: OffsetDateTime,
|
||||||
|
pub deletion_time: Option<OffsetDateTime>,
|
||||||
|
pub version_number: i64,
|
||||||
|
pub secret_path: String,
|
||||||
|
|
||||||
|
pub nonce: Vec<u8>,
|
||||||
|
pub encrypted_data: Vec<u8>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl SecretDataInternal {
|
||||||
|
pub async fn into_external(self) -> KvSecretData {
|
||||||
|
let secret = Secret::new(self.encrypted_data, self.nonce).decrypt().await;
|
||||||
|
KvSecretData {
|
||||||
|
created_time: self.created_time,
|
||||||
|
deletion_time: self.deletion_time,
|
||||||
|
version_number: self.version_number,
|
||||||
|
secret_path: self.secret_path,
|
||||||
|
secret_data: secret.unwrap(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn get_data(
|
||||||
|
State(pool): State<DbPool>,
|
||||||
|
Query(params): Query<GetDataQuery>,
|
||||||
|
Path(path): Path<String>,
|
||||||
|
Extension(EnginePath(engine_path)): Extension<EnginePath>,
|
||||||
|
) -> Result<Response, ()> {
|
||||||
|
debug!("Get request: Engine: {engine_path}, path: {path}",);
|
||||||
|
|
||||||
|
let res = if params.version != 0 {
|
||||||
|
// With specific version
|
||||||
|
sqlx::query_as!(
|
||||||
|
SecretDataInternal,
|
||||||
|
r#"SELECT nonce, encrypted_data, created_time, deletion_time, version_number, secret_path
|
||||||
|
FROM kv2_secret_version WHERE engine_path = $1 AND secret_path = $2 AND deletion_time IS NULL
|
||||||
|
AND version_number = $3"#,
|
||||||
|
engine_path, path, params.version).fetch_one(&pool).await
|
||||||
|
} else {
|
||||||
|
// Without specific version
|
||||||
|
sqlx::query_as!(
|
||||||
|
SecretDataInternal,
|
||||||
|
r#"SELECT nonce, encrypted_data, created_time, deletion_time, version_number, secret_path
|
||||||
|
FROM kv2_secret_version WHERE engine_path = $1 AND secret_path = $2 AND deletion_time IS NULL
|
||||||
|
ORDER BY version_number DESC LIMIT 1"#,
|
||||||
|
engine_path, path).fetch_one(&pool).await
|
||||||
|
};
|
||||||
|
|
||||||
|
match res {
|
||||||
|
Ok(secret_content) => {
|
||||||
|
let secret_content = secret_content.into_external().await;
|
||||||
|
let inner = secret_content.secret_data;
|
||||||
|
let data = Wrapper {
|
||||||
|
data: serde_json::from_str(&inner).unwrap(),
|
||||||
|
};
|
||||||
|
let return_secret = KvSecretRes {
|
||||||
|
data,
|
||||||
|
options: None,
|
||||||
|
version: Some(secret_content.version_number),
|
||||||
|
};
|
||||||
|
let return_secret = Json(return_secret);
|
||||||
|
info!("{return_secret:?}");
|
||||||
|
|
||||||
|
Ok(return_secret.into_response())
|
||||||
|
}
|
||||||
|
Err(e) => match e {
|
||||||
|
sqlx::Error::RowNotFound => {
|
||||||
|
warn!("Secret not found (could be correct behavior) {e:?}");
|
||||||
|
Ok(HttpError::simple(
|
||||||
|
StatusCode::NOT_FOUND,
|
||||||
|
"Secret not found within kv2 engine",
|
||||||
|
))
|
||||||
|
}
|
||||||
|
_ => panic!("Unhandled error: {e:?}"),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn post_data(
|
||||||
|
State(pool): State<DbPool>,
|
||||||
|
Path(kv_path): Path<String>,
|
||||||
|
Extension(EnginePath(engine_path)): Extension<EnginePath>,
|
||||||
|
Json(secret): Json<KvV2WriteRequest>,
|
||||||
|
) -> Result<Response, ()> {
|
||||||
|
debug!(
|
||||||
|
"Engine: {}, Secret: {}, Version: {:?}, path: {}",
|
||||||
|
engine_path,
|
||||||
|
kv_path,
|
||||||
|
secret.version, //.unwrap_or(0),
|
||||||
|
kv_path
|
||||||
|
);
|
||||||
|
|
||||||
|
let created_time = time::UtcDateTime::now();
|
||||||
|
let ts = created_time.unix_timestamp();
|
||||||
|
|
||||||
|
let content = serde_json::to_string(&secret.data).unwrap();
|
||||||
|
|
||||||
|
let Secret {
|
||||||
|
nonce,
|
||||||
|
protected_data,
|
||||||
|
} = Secret::encrypt(&content).await.unwrap();
|
||||||
|
let nonce = nonce.as_slice();
|
||||||
|
|
||||||
|
let mut tx = pool.begin().await.unwrap();
|
||||||
|
|
||||||
|
let _ = sqlx::query!("
|
||||||
|
INSERT INTO kv2_metadata (engine_path, secret_path, cas_required, created_time, max_versions, updated_time)
|
||||||
|
VALUES ($1, $2, 0, $3, 100, $3)
|
||||||
|
ON CONFLICT(engine_path, secret_path) DO NOTHING;
|
||||||
|
", engine_path, kv_path, ts).execute(&mut *tx).await.unwrap();
|
||||||
|
|
||||||
|
let res_r = sqlx::query_file!(
|
||||||
|
"src/engines/kv/post_secret.sql",
|
||||||
|
engine_path,
|
||||||
|
kv_path,
|
||||||
|
nonce,
|
||||||
|
protected_data,
|
||||||
|
ts,
|
||||||
|
secret.version,
|
||||||
|
)
|
||||||
|
.fetch_one(&mut *tx)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
tx.commit().await.expect("FAILED TO WRITE TX!");
|
||||||
|
|
||||||
|
let res = KvV2WriteResponse {
|
||||||
|
created_time: created_time.into(),
|
||||||
|
custom_metadata: None,
|
||||||
|
deletion_time: None,
|
||||||
|
destroyed: false,
|
||||||
|
version: res_r.version_number,
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok(Json(res).into_response())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// TODO: soft delete the secret version at path. can be undone with undelete_secret
|
||||||
|
// https://developer.hashicorp.com/vault/api-docs/secret/kv/kv-v2#delete-latest-version-of-secret
|
||||||
|
// https://developer.hashicorp.com/vault/api-docs/secret/kv/kv-v2#delete-secret-versions
|
||||||
|
pub async fn delete_data(
|
||||||
|
State(pool): State<DbPool>,
|
||||||
|
Path(path): Path<String>,
|
||||||
|
Extension(EnginePath(engine_path)): Extension<EnginePath>,
|
||||||
|
) -> Result<Response, Response> {
|
||||||
|
debug!("Secret: {path}, path: {path}");
|
||||||
|
|
||||||
|
let del_time = UtcDateTime::now().unix_timestamp();
|
||||||
|
|
||||||
|
let mut tx = pool.begin().await.unwrap();
|
||||||
|
|
||||||
|
// TODO: Find a better way
|
||||||
|
let latest_version = sqlx::query!(
|
||||||
|
r#"
|
||||||
|
SELECT version_number AS latest_version FROM kv2_secret_version
|
||||||
|
WHERE engine_path = $1 AND secret_path = $2 AND deletion_time IS NULL
|
||||||
|
ORDER BY version_number DESC LIMIT 1"#,
|
||||||
|
engine_path,
|
||||||
|
path,
|
||||||
|
)
|
||||||
|
.fetch_optional(&mut *tx)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let latest_version = match latest_version {
|
||||||
|
Some(v) => v.latest_version,
|
||||||
|
None => {
|
||||||
|
return Err(HttpError::simple(
|
||||||
|
StatusCode::NOT_FOUND,
|
||||||
|
"No secret version found which could be deleted",
|
||||||
|
));
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let u = sqlx::query!(
|
||||||
|
r#"
|
||||||
|
UPDATE kv2_secret_version
|
||||||
|
SET deletion_time = $4
|
||||||
|
WHERE engine_path = $1 AND secret_path = $2
|
||||||
|
AND version_number = $3
|
||||||
|
"#,
|
||||||
|
engine_path,
|
||||||
|
path,
|
||||||
|
latest_version,
|
||||||
|
del_time
|
||||||
|
)
|
||||||
|
.execute(&mut *tx)
|
||||||
|
.await;
|
||||||
|
|
||||||
|
if let Err(e) = u {
|
||||||
|
error!(
|
||||||
|
"Strange - a version to be deleted has been found but could not be found to set deletion.\n\t{e:?}"
|
||||||
|
);
|
||||||
|
// Not committed transactions will be aborted upon drop
|
||||||
|
// tx.rollback().await.unwrap();
|
||||||
|
return Err(HttpError::simple(
|
||||||
|
StatusCode::INTERNAL_SERVER_ERROR,
|
||||||
|
"A version to be deleted was found but could not be deleted",
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
tx.commit().await.unwrap();
|
||||||
|
|
||||||
|
info!("Secret {path} version {latest_version} of {engine_path} engine deleted! {u:?}");
|
||||||
|
|
||||||
|
Ok(NoContent.into_response())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn patch_data(
|
||||||
|
State(pool): State<DbPool>,
|
||||||
|
Path(kv_path): Path<String>,
|
||||||
|
Extension(EnginePath(engine_path)): Extension<EnginePath>,
|
||||||
|
Json(secret): Json<KvV2WriteRequest>,
|
||||||
|
) -> &'static str {
|
||||||
|
todo!("not implemented")
|
||||||
|
}
|
||||||
|
|
@ -1,35 +0,0 @@
|
||||||
use chrono::{DateTime, Utc};
|
|
||||||
use serde::Serialize;
|
|
||||||
use sqlx::FromRow;
|
|
||||||
|
|
||||||
#[derive(FromRow, Debug)]
|
|
||||||
pub struct DbSecretMeta {
|
|
||||||
pub secret_path: String,
|
|
||||||
pub cas_required: bool,
|
|
||||||
pub created_time: DateTime<Utc>,
|
|
||||||
// Consider implementation of duration type for further development:
|
|
||||||
// https://developer.hashicorp.com/vault/docs/concepts/duration-format
|
|
||||||
/// In Hashicorp:
|
|
||||||
/// If not set, the backend's configured delete_version_after is used.
|
|
||||||
/// Cannot be greater than the backend's delete_version_after
|
|
||||||
pub delete_version_after: Option<String>,
|
|
||||||
|
|
||||||
///In Hashicorp:
|
|
||||||
/// The number of versions to keep per key.
|
|
||||||
/// If not set, the backend’s configured max version is used.
|
|
||||||
/// Once a key has more than the configured allowed versions,
|
|
||||||
/// the oldest version will be permanently deleted.
|
|
||||||
pub max_versions: i64,
|
|
||||||
pub updated_time: DateTime<Utc>,
|
|
||||||
/// User-provided key-value pairs that are used to describe arbitrary and version-agnostic information about a secret.
|
|
||||||
pub custom_data: Option<String>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Serialize, Debug, FromRow)]
|
|
||||||
/// Metadata concerning a specific secret version
|
|
||||||
/// contained by [KvMetaRes]
|
|
||||||
pub struct DbSecretVersionMeta {
|
|
||||||
pub version_number: i64,
|
|
||||||
pub created_time: DateTime<Utc>,
|
|
||||||
pub deletion_time: DateTime<Utc>,
|
|
||||||
}
|
|
||||||
25
src/engines/kv/delete_secret.sql
Normal file
25
src/engines/kv/delete_secret.sql
Normal file
|
|
@ -0,0 +1,25 @@
|
||||||
|
|
||||||
|
WITH latest AS (
|
||||||
|
SELECT version_number AS version
|
||||||
|
FROM kv2_secret_version
|
||||||
|
WHERE engine_path = '/kv-v2' AND secret_path = 'foo' AND deletion_time IS NULL
|
||||||
|
ORDER BY version_number DESC
|
||||||
|
LIMIT 1
|
||||||
|
),
|
||||||
|
update_deleted AS (
|
||||||
|
UPDATE kv2_secret_version
|
||||||
|
SET deletion_time = CURRENT_TIMESTAMP
|
||||||
|
WHERE engine_path = '/kv-v2' AND secret_path = 'foo'
|
||||||
|
AND version_number = (SELECT version FROM latest)
|
||||||
|
RETURNING version_number AS deleted_version
|
||||||
|
),
|
||||||
|
new_latest AS (
|
||||||
|
SELECT version_number AS new_latest_version
|
||||||
|
FROM kv2_secret_version
|
||||||
|
WHERE engine_path = '/kv-v2' AND secret_path = 'foo' AND deletion_time IS NULL
|
||||||
|
ORDER BY version_number DESC
|
||||||
|
LIMIT 1
|
||||||
|
)
|
||||||
|
SELECT
|
||||||
|
(SELECT deleted_version FROM update_deleted) AS deleted_version,
|
||||||
|
(SELECT new_latest_version FROM new_latest) AS new_latest_version;
|
||||||
|
|
@ -1,143 +0,0 @@
|
||||||
use axum::{
|
|
||||||
body::Body,
|
|
||||||
http::{Response, StatusCode},
|
|
||||||
response::IntoResponse,
|
|
||||||
};
|
|
||||||
use chrono::{DateTime, Utc};
|
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
use std::collections::HashMap;
|
|
||||||
use zeroize::Zeroize;
|
|
||||||
|
|
||||||
pub type KvSecretData = HashMap<String, String>;
|
|
||||||
|
|
||||||
// This file contains structures for serializing HTTP Responses (Res) and deserializing Requests (Req) for the KV engine
|
|
||||||
|
|
||||||
#[derive(Deserialize, Debug)]
|
|
||||||
// #[zeroize(drop)]
|
|
||||||
/// HTTP Request to create or update a secret
|
|
||||||
pub struct KvSecretReq {
|
|
||||||
/// Map (required)
|
|
||||||
pub data: KvSecretData,
|
|
||||||
/// Map (optional), may contain `cas` integer
|
|
||||||
// #[serde_as(as = "serde_with::EnumMap")]
|
|
||||||
pub options: Option<HashMap<String, String>>,
|
|
||||||
// Version does not exist for create/update operations
|
|
||||||
// pub version: Option<i64>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Zeroize for KvSecretReq {
|
|
||||||
fn zeroize(&mut self) {
|
|
||||||
// Zero out each field individually
|
|
||||||
self.data = HashMap::new();
|
|
||||||
self.options = None;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Drop for KvSecretReq {
|
|
||||||
fn drop(&mut self) {
|
|
||||||
self.zeroize();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Serialize, Debug)]
|
|
||||||
/// HTTP Response to creating or updating a secret
|
|
||||||
/// Contained by [`KvSecretRes`]
|
|
||||||
pub struct KvSecretResData {
|
|
||||||
pub created_time: DateTime<Utc>,
|
|
||||||
pub custom_metadata: Option<HashMap<String, String>>,
|
|
||||||
pub deletion_time: Option<DateTime<Utc>>,
|
|
||||||
pub destroyed: bool,
|
|
||||||
pub version: i64,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Serialize, Debug)]
|
|
||||||
/// HTTP Response to creating or updating a secret
|
|
||||||
/// Container of [`KvSecretResData`]
|
|
||||||
pub struct KvSecretRes {
|
|
||||||
pub data: KvSecretResData,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl KvSecretRes {
|
|
||||||
pub fn new(data: KvSecretResData) -> Self {
|
|
||||||
KvSecretRes { data }
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Serialize)]
|
|
||||||
pub struct ErrorStruct {
|
|
||||||
pub err: String,
|
|
||||||
}
|
|
||||||
impl ErrorStruct {
|
|
||||||
pub fn into_response(self) -> Response<Body> {
|
|
||||||
let body = self.err;
|
|
||||||
(StatusCode::NOT_FOUND, body).into_response()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Deserialize)]
|
|
||||||
/// HTTP Request to destroy secret versions
|
|
||||||
pub struct KvSecretDestrReq {
|
|
||||||
pub versions: Vec<i64>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Serialize, Debug)]
|
|
||||||
/// HTTP Response to Reading a Secret metadata
|
|
||||||
/// Container of [`KvMetaResData`]
|
|
||||||
pub struct KvMetaRes {
|
|
||||||
pub data: KvMetaResData,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Default for KvMetaRes {
|
|
||||||
fn default() -> Self {
|
|
||||||
let now = Utc::now();
|
|
||||||
Self {
|
|
||||||
data: KvMetaResData {
|
|
||||||
cas_required: false,
|
|
||||||
created_time: now,
|
|
||||||
delete_version_after: Some("".to_string()),
|
|
||||||
max_versions: 0,
|
|
||||||
updated_time: now,
|
|
||||||
custom_metadata: Some(HashMap::new()),
|
|
||||||
current_version: 0,
|
|
||||||
oldest_version: 0,
|
|
||||||
|
|
||||||
versions: HashMap::new(),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Serialize, Debug)]
|
|
||||||
/// Metadata concerning a specific secret version
|
|
||||||
/// contained by [KvMetaRes]
|
|
||||||
pub struct KvMetaResVersionData {
|
|
||||||
pub created_time: DateTime<Utc>,
|
|
||||||
pub deletion_time: DateTime<Utc>,
|
|
||||||
pub destroyed: bool,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Serialize, Debug)]
|
|
||||||
/// contained by [KvMetaRes]
|
|
||||||
pub struct KvMetaResData {
|
|
||||||
pub cas_required: bool,
|
|
||||||
pub created_time: DateTime<Utc>,
|
|
||||||
pub current_version: i64,
|
|
||||||
pub delete_version_after: Option<String>,
|
|
||||||
pub max_versions: i64,
|
|
||||||
pub oldest_version: i64,
|
|
||||||
pub updated_time: DateTime<Utc>,
|
|
||||||
pub custom_metadata: Option<HashMap<String, String>>,
|
|
||||||
pub versions: HashMap<i64, KvMetaResVersionData>,
|
|
||||||
// here, the key to a version is the version number
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Serialize, Debug, Deserialize)]
|
|
||||||
/// HTTP Request to post metadatas
|
|
||||||
pub struct KvMetaReq {
|
|
||||||
pub cas_required: Option<bool>,
|
|
||||||
// pub cas_required: bool,
|
|
||||||
pub delete_version_after: Option<String>,
|
|
||||||
pub max_versions: i64,
|
|
||||||
// pub updated_time: Option<DateTime<Utc>>,
|
|
||||||
pub custom_metadata: Option<HashMap<String, String>>,
|
|
||||||
}
|
|
||||||
|
|
@ -1,31 +0,0 @@
|
||||||
use serde_json::Value;
|
|
||||||
|
|
||||||
use super::{db_structs::SecretMeta, http_structs::*};
|
|
||||||
|
|
||||||
// Consider leaving this here - JSON merge patch is also used in the official implementation
|
|
||||||
#[deprecated(note = "Propably not needed (remove deprecation if actually needed)")]
|
|
||||||
/// Consider:
|
|
||||||
/// Instead of patching JSON, we should apply the modified fields directly to the database
|
|
||||||
pub fn patch_metadata(
|
|
||||||
old: &mut SecretMeta,
|
|
||||||
new: &SecretMeta,
|
|
||||||
) -> Result<SecretMeta, serde_json::Error> {
|
|
||||||
let mut patch = serde_json::to_value(old)?; // ? operator is cool: returns early if error was detected
|
|
||||||
let new_json = serde_json::to_value(new)?;
|
|
||||||
json_patch::merge(&mut patch, &new_json);
|
|
||||||
serde_json::from_value(patch)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[deprecated(note = "DO NOT USE: Use Axum extractors to structs instead")]
|
|
||||||
#[allow(unreachable_code, unused_variables)]
|
|
||||||
/// See [JSON extractor documentation](https://docs.rs/axum/latest/axum/struct.Json.html#extractor-example)
|
|
||||||
pub fn body_to_json(body: String) -> Value {
|
|
||||||
todo!("REMOVE: Use Axum extractors to structs instead");
|
|
||||||
match serde_json::from_str::<serde_json::Value>(body.as_str()) {
|
|
||||||
Ok(val) => val,
|
|
||||||
Err(e) => {
|
|
||||||
log::debug!("Faulty result from conversion: {:?}", e);
|
|
||||||
"Error converting body".into()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
33
src/engines/kv/meta.rs
Normal file
33
src/engines/kv/meta.rs
Normal file
|
|
@ -0,0 +1,33 @@
|
||||||
|
// There are some placeholder functions, that will have to be implemented before the first release.
|
||||||
|
// They are marked with `todo!()` to indicate that they need to be implemented.
|
||||||
|
// We want to keep these functions in the codebase.
|
||||||
|
// That is why we choose to suppress unused warnings for now.
|
||||||
|
// TODO
|
||||||
|
#![allow(unused)]
|
||||||
|
|
||||||
|
use crate::storage::DbPool;
|
||||||
|
use axum::extract::{Path, State};
|
||||||
|
|
||||||
|
pub async fn delete_path() -> &'static str {
|
||||||
|
todo!("not implemented")
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn destroy_path() -> &'static str {
|
||||||
|
todo!("not implemented")
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn get_meta() -> &'static str {
|
||||||
|
todo!("not implemented")
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn post_meta(
|
||||||
|
State(pool): State<DbPool>,
|
||||||
|
Path((mount_path, kv_path)): Path<(String, String)>,
|
||||||
|
body: String,
|
||||||
|
) -> &'static str {
|
||||||
|
todo!("not implemented")
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn delete_meta() -> &'static str {
|
||||||
|
todo!("not implemented")
|
||||||
|
}
|
||||||
19
src/engines/kv/post_secret.sql
Normal file
19
src/engines/kv/post_secret.sql
Normal file
|
|
@ -0,0 +1,19 @@
|
||||||
|
|
||||||
|
WITH latest_version AS (
|
||||||
|
SELECT MAX(version_number) AS max_version
|
||||||
|
FROM kv2_secret_version
|
||||||
|
WHERE engine_path = $1 AND secret_path = $2 -- engine_path AND secret_path
|
||||||
|
)
|
||||||
|
INSERT INTO kv2_secret_version (engine_path, secret_path, nonce, encrypted_data, created_time, version_number)
|
||||||
|
VALUES (
|
||||||
|
$1, -- engine_path
|
||||||
|
$2, -- secret_path
|
||||||
|
$3, -- nonce
|
||||||
|
$4, -- encrypted_data
|
||||||
|
$5, -- created_time
|
||||||
|
CASE -- Use provided version if given
|
||||||
|
WHEN $6 IS NOT NULL THEN $6 -- version_number (optional)
|
||||||
|
ELSE COALESCE((SELECT max_version FROM latest_version) + 1, 1) -- otherwise 1
|
||||||
|
END -- version_number logic
|
||||||
|
)
|
||||||
|
RETURNING version_number;
|
||||||
123
src/engines/kv/structs.rs
Normal file
123
src/engines/kv/structs.rs
Normal file
|
|
@ -0,0 +1,123 @@
|
||||||
|
// There are some placeholder functions, that will have to be implemented before the first release.
|
||||||
|
// They are marked with `todo!()` to indicate that they need to be implemented.
|
||||||
|
// We want to keep these functions in the codebase.
|
||||||
|
// That is why we choose to suppress unused warnings for now.
|
||||||
|
#![allow(unused)]
|
||||||
|
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
use std::{collections::HashMap, vec};
|
||||||
|
use time::{OffsetDateTime, UtcDateTime, serde::rfc3339};
|
||||||
|
|
||||||
|
// #[derive(Serialize, Deserialize, Debug)]
|
||||||
|
// pub struct KvSecretData {
|
||||||
|
// pub secret_data: String,
|
||||||
|
// #[serde(with = "rfc3339")]
|
||||||
|
// pub created_time: UtcDateTime,
|
||||||
|
|
||||||
|
// #[serde(with = "rfc3339::option")]
|
||||||
|
// pub deletion_time: Option<UtcDateTime>,
|
||||||
|
// pub version_number: i64,
|
||||||
|
// pub secret_path: String,
|
||||||
|
// }
|
||||||
|
|
||||||
|
#[derive(Serialize, Deserialize, Debug, Clone)]
|
||||||
|
pub struct KvSecretData {
|
||||||
|
pub secret_data: String,
|
||||||
|
#[serde(with = "rfc3339")]
|
||||||
|
pub created_time: OffsetDateTime,
|
||||||
|
#[serde(with = "rfc3339::option")]
|
||||||
|
pub deletion_time: Option<OffsetDateTime>,
|
||||||
|
pub version_number: i64,
|
||||||
|
pub secret_path: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
// impl From<KvSecretDataDBO> for KvSecretData {
|
||||||
|
// fn from(value: KvSecretDataDBO) -> Self {
|
||||||
|
// Self {
|
||||||
|
// secret_data: value.secret_data,
|
||||||
|
// created_time: value.created_time.to_offset(UtcOffset::UTC),
|
||||||
|
// deletion_time: value.deletion_time.map(|v| v.to_utc()),
|
||||||
|
// version_number: value.version_number,
|
||||||
|
// secret_path: value.secret_path,
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
|
||||||
|
#[derive(serde::Serialize, Deserialize, Debug)]
|
||||||
|
pub struct Wrapper<T> {
|
||||||
|
pub data: T,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize, Deserialize, Debug)]
|
||||||
|
pub struct KvSecretRes {
|
||||||
|
/// Map (required)
|
||||||
|
pub data: Wrapper<serde_json::Value>,
|
||||||
|
/// Map (optional), may contain `cas` integer
|
||||||
|
/// Set the `cas` value to use a Check-And-Set operation
|
||||||
|
// #[serde_as(as = "serde_with::EnumMap")]
|
||||||
|
pub options: Option<HashMap<String, String>>,
|
||||||
|
// Version does not exist for create/update operations
|
||||||
|
pub version: Option<i64>,
|
||||||
|
// TODO add all fields
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize)]
|
||||||
|
pub struct KvV2WriteRequest {
|
||||||
|
pub data: serde_json::Value,
|
||||||
|
pub options: Option<serde_json::Value>,
|
||||||
|
pub version: Option<i32>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize, Debug)]
|
||||||
|
pub struct KvV2WriteResponse {
|
||||||
|
#[serde(with = "rfc3339")]
|
||||||
|
pub created_time: OffsetDateTime,
|
||||||
|
pub custom_metadata: Option<HashMap<String, String>>,
|
||||||
|
#[serde(with = "rfc3339::option")]
|
||||||
|
pub deletion_time: Option<OffsetDateTime>,
|
||||||
|
pub destroyed: bool,
|
||||||
|
pub version: i64,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize, Deserialize, Debug)]
|
||||||
|
pub struct VersionMeta {
|
||||||
|
pub created_time: UtcDateTime,
|
||||||
|
pub deletion_time: Option<UtcDateTime>, // optional deletion time
|
||||||
|
pub destroyed: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize, Deserialize, Debug)]
|
||||||
|
pub struct SecretMeta {
|
||||||
|
pub cas_required: bool,
|
||||||
|
pub created_time: UtcDateTime,
|
||||||
|
pub current_version: i64,
|
||||||
|
/// In Hashicorp:
|
||||||
|
/// If not set, the backend's configured delete_version_after is used.
|
||||||
|
/// Cannot be greater than the backend's delete_version_after
|
||||||
|
// TODO: implement duration type
|
||||||
|
pub delete_version_after: String,
|
||||||
|
// TODO https://developer.hashicorp.com/vault/docs/concepts/duration-format
|
||||||
|
pub max_versions: i64,
|
||||||
|
pub oldest_version: i64,
|
||||||
|
pub updated_time: UtcDateTime,
|
||||||
|
/// User-provided key-value pairs that are used to describe arbitrary and version-agnostic information about a secret.
|
||||||
|
pub custom_metadata: Option<HashMap<String, String>>,
|
||||||
|
pub versions: Vec<VersionMeta>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for SecretMeta {
|
||||||
|
fn default() -> Self {
|
||||||
|
let current = UtcDateTime::now();
|
||||||
|
SecretMeta {
|
||||||
|
cas_required: false,
|
||||||
|
created_time: current,
|
||||||
|
current_version: 1,
|
||||||
|
delete_version_after: "24h00m00s".to_string(),
|
||||||
|
max_versions: 10,
|
||||||
|
oldest_version: 1,
|
||||||
|
updated_time: current,
|
||||||
|
custom_metadata: None,
|
||||||
|
versions: vec![],
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -1,68 +0,0 @@
|
||||||
// This file is deprecated. Currently, all tests are in written in go.
|
|
||||||
|
|
||||||
// use std::collections::HashMap;
|
|
||||||
|
|
||||||
// use chrono::Utc;
|
|
||||||
// use tests::{
|
|
||||||
// logic::patch_metadata,
|
|
||||||
// structs::{SecretMeta, VersionMeta},
|
|
||||||
// };
|
|
||||||
|
|
||||||
// use super::*;
|
|
||||||
|
|
||||||
// #[test]
|
|
||||||
// #[cfg(target_feature = "_disabled")]
|
|
||||||
// fn print_serialized_test() {
|
|
||||||
// let temp_secret = TempSecret {
|
|
||||||
// content: String::from("Hallo"),
|
|
||||||
// version: 12,
|
|
||||||
// };
|
|
||||||
// let serialized = serialize_secret_json(&temp_secret);
|
|
||||||
// println!("string serialized: {:?}", serialized);
|
|
||||||
// let deserialized = deserialize_secret_struct(&serialized.unwrap());
|
|
||||||
// println!(
|
|
||||||
// "Struct field from deserialized: {}",
|
|
||||||
// deserialized.unwrap().content
|
|
||||||
// )
|
|
||||||
// }
|
|
||||||
// #[test]
|
|
||||||
// #[cfg(target_feature = "_disabled")]
|
|
||||||
// fn test_patching() {
|
|
||||||
// // TODO add more assertions
|
|
||||||
// let mut base = create_mock_meta();
|
|
||||||
// println!("OLD metadata: {:?}", base);
|
|
||||||
// let overwrite: SecretMeta = SecretMeta {
|
|
||||||
// max_versions: 10,
|
|
||||||
// versions: vec![VersionMeta {
|
|
||||||
// created_time: Utc::now(),
|
|
||||||
// deletion_time: Some(Utc::now()),
|
|
||||||
// destroyed: true,
|
|
||||||
// }],
|
|
||||||
// cas_required: true,
|
|
||||||
// delete_version_after: "10m".to_string(),
|
|
||||||
// current_version: 4,
|
|
||||||
// oldest_version: 2,
|
|
||||||
// updated_time: Utc::now(),
|
|
||||||
// created_time: Utc::now(),
|
|
||||||
// custom_metadata: Some(HashMap::new()),
|
|
||||||
// };
|
|
||||||
// let mut patched: Option<SecretMeta> = None;
|
|
||||||
// match patch_metadata(&mut base, &overwrite) {
|
|
||||||
// Ok(meta) => {
|
|
||||||
// println!("NEW metadata: {:?}", meta);
|
|
||||||
// println!("patched successfully");
|
|
||||||
// patched = Some(meta);
|
|
||||||
// }
|
|
||||||
// Err(e) => {
|
|
||||||
// log::error!("error patching metadata: {}", e);
|
|
||||||
// panic!("Patching failed");
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
|
|
||||||
// if let Some(patched_meta) = patched {
|
|
||||||
// assert!(patched_meta.current_version == 4);
|
|
||||||
// assert!(patched_meta.versions[0].destroyed == true);
|
|
||||||
// } else {
|
|
||||||
// panic!("patched was not initialized");
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
|
|
@ -1,7 +1,7 @@
|
||||||
use axum::Router;
|
use axum::Router;
|
||||||
|
|
||||||
use crate::storage::DatabaseDriver;
|
use crate::storage::DbPool;
|
||||||
|
|
||||||
pub fn identity_router(pool: DatabaseDriver) -> Router<DatabaseDriver> {
|
pub fn identity_router(pool: DbPool) -> Router<DbPool> {
|
||||||
Router::new().with_state(pool)
|
Router::new().with_state(pool)
|
||||||
}
|
}
|
||||||
|
|
|
||||||
41
src/main.rs
41
src/main.rs
|
|
@ -1,18 +1,25 @@
|
||||||
|
#![forbid(unsafe_code)]
|
||||||
|
|
||||||
|
// // There are some placeholder functions, that will have to be implemented before the first release.
|
||||||
|
// // They are marked with `todo!()` to indicate that they need to be implemented.
|
||||||
|
// // We want to keep these functions in the codebase.
|
||||||
|
// // That is why we choose to suppress unused warnings for now.
|
||||||
|
// #![allow(unused)]
|
||||||
|
|
||||||
|
use crate::common::HttpError;
|
||||||
use axum::{
|
use axum::{
|
||||||
|
Router,
|
||||||
extract::Request,
|
extract::Request,
|
||||||
http::StatusCode,
|
http::StatusCode,
|
||||||
middleware::{self, Next},
|
middleware::{self, Next},
|
||||||
response::{IntoResponse, Response},
|
response::{IntoResponse, Response},
|
||||||
routing::get,
|
routing::get,
|
||||||
Router,
|
|
||||||
};
|
};
|
||||||
use log::*;
|
use log::*;
|
||||||
use std::{env, net::SocketAddr, str::FromStr};
|
use std::{env, net::SocketAddr, str::FromStr};
|
||||||
use storage::DatabaseDriver;
|
use storage::DbPool;
|
||||||
use tokio::{net::TcpListener, signal};
|
use tokio::{net::TcpListener, signal};
|
||||||
|
|
||||||
use crate::common::HttpError;
|
|
||||||
|
|
||||||
mod auth;
|
mod auth;
|
||||||
mod common;
|
mod common;
|
||||||
mod engines;
|
mod engines;
|
||||||
|
|
@ -22,13 +29,12 @@ mod sys;
|
||||||
|
|
||||||
#[tokio::main]
|
#[tokio::main]
|
||||||
async fn main() {
|
async fn main() {
|
||||||
// To be configured via environment variables
|
// NOTE: Rethink choice of environment variables in regards to security in the future
|
||||||
// choose from (highest to lowest): error, warn, info, debug, trace, off
|
let _ = dotenvy::dotenv();
|
||||||
env::set_var("RUST_LOG", "trace");
|
|
||||||
// env::set_var("DATABASE_URL", "sqlite:test.db"); // Format for the env var config. Consider moving to an .env file
|
|
||||||
env_logger::init();
|
env_logger::init();
|
||||||
|
|
||||||
// Listen on all IPv4 and IPv6 interfaces on port 8200 by default
|
// Listen on all IPv4 and IPv6 interfaces on port 8200 by default
|
||||||
let listen_addr = env::var("LISTEN_ADDR").unwrap_or("[::]:8200".to_string()); // Do not change
|
let listen_addr = env::var("LISTEN_ADDR").unwrap_or("[::]:8200".to_string());
|
||||||
let listen_addr = SocketAddr::from_str(&listen_addr).expect("Failed to parse LISTEN_ADDR");
|
let listen_addr = SocketAddr::from_str(&listen_addr).expect("Failed to parse LISTEN_ADDR");
|
||||||
|
|
||||||
let db_url = env::var("DATABASE_URL").expect("DATABASE_URL must be set");
|
let db_url = env::var("DATABASE_URL").expect("DATABASE_URL must be set");
|
||||||
|
|
@ -46,7 +52,13 @@ async fn main() {
|
||||||
.layer(middleware::from_fn(set_default_content_type_json))
|
.layer(middleware::from_fn(set_default_content_type_json))
|
||||||
.with_state(pool.clone());
|
.with_state(pool.clone());
|
||||||
|
|
||||||
warn!("Listening on {}", listen_addr.to_string());
|
if !storage::sealing::prepare_unseal(&pool).await {
|
||||||
|
storage::sealing::init_default(&pool).await;
|
||||||
|
}
|
||||||
|
|
||||||
|
auth::token::create_root_token_if_none_exist(&pool).await;
|
||||||
|
|
||||||
|
warn!("Listening on {listen_addr}");
|
||||||
// Start listening
|
// Start listening
|
||||||
let listener = TcpListener::bind(listen_addr).await.unwrap();
|
let listener = TcpListener::bind(listen_addr).await.unwrap();
|
||||||
axum::serve(listener, app)
|
axum::serve(listener, app)
|
||||||
|
|
@ -55,20 +67,22 @@ async fn main() {
|
||||||
.unwrap();
|
.unwrap();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Middleware setting unspecified `Content-Type`s to json since this is done by client libraries.
|
||||||
|
/// Axum's [axum::extract::Json] rejects extraction attempts without json content type.
|
||||||
async fn set_default_content_type_json(
|
async fn set_default_content_type_json(
|
||||||
mut req: Request,
|
mut req: Request,
|
||||||
next: Next,
|
next: Next,
|
||||||
) -> Result<impl IntoResponse, Response> {
|
) -> Result<impl IntoResponse, Response> {
|
||||||
if req.headers().get("content-type").is_none() {
|
if req.headers().get("content-type").is_none() {
|
||||||
let headers = req.headers_mut();
|
let headers = req.headers_mut();
|
||||||
// debug!("Request header: \n{:?}", headers);
|
|
||||||
headers.insert("content-type", "application/json".parse().unwrap());
|
headers.insert("content-type", "application/json".parse().unwrap());
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(next.run(req).await)
|
Ok(next.run(req).await)
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn shutdown_signal(pool: DatabaseDriver) {
|
async fn shutdown_signal(pool: DbPool) {
|
||||||
let ctrl_c = async {
|
let ctrl_c = async {
|
||||||
signal::ctrl_c()
|
signal::ctrl_c()
|
||||||
.await
|
.await
|
||||||
|
|
@ -95,6 +109,7 @@ async fn shutdown_signal(pool: DatabaseDriver) {
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Fallback route for unknown routes
|
/// Fallback route for unknown routes
|
||||||
|
///
|
||||||
/// Note: `/v1/*` is handled by [`engines::secrets_router`]
|
/// Note: `/v1/*` is handled by [`engines::secrets_router`]
|
||||||
async fn fallback_route_unknown(req: Request) -> Response {
|
async fn fallback_route_unknown(req: Request) -> Response {
|
||||||
log::error!(
|
log::error!(
|
||||||
|
|
@ -107,7 +122,7 @@ async fn fallback_route_unknown(req: Request) -> Response {
|
||||||
HttpError::simple(StatusCode::NOT_FOUND, "Route not implemented")
|
HttpError::simple(StatusCode::NOT_FOUND, "Route not implemented")
|
||||||
}
|
}
|
||||||
|
|
||||||
/// basic handler that responds with a static string
|
/// Basic handler that responds with a static string
|
||||||
async fn root() -> &'static str {
|
async fn root() -> &'static str {
|
||||||
info!("Hello world");
|
info!("Hello world");
|
||||||
"Hello, World!"
|
"Hello, World!"
|
||||||
|
|
|
||||||
|
|
@ -1,16 +1,26 @@
|
||||||
|
pub mod sealing;
|
||||||
|
|
||||||
use std::{fs::File, path::Path};
|
use std::{fs::File, path::Path};
|
||||||
|
|
||||||
use log::*;
|
use log::*;
|
||||||
use sqlx::{sqlite::SqlitePoolOptions, Pool, Sqlite};
|
use sqlx::{Pool, Sqlite, sqlite::SqlitePoolOptions};
|
||||||
|
|
||||||
pub(crate) type DatabaseDriver = Pool<Sqlite>;
|
pub(crate) type DbType = Sqlite;
|
||||||
|
pub(crate) type DbPool = Pool<DbType>;
|
||||||
|
|
||||||
pub async fn create_pool(db_url: String) -> DatabaseDriver {
|
/// Creates a SQLx SQLite database pool.
|
||||||
|
/// If nonexistent, it creates a new SQLite file.
|
||||||
|
///
|
||||||
|
/// Note: rvault uses compile-time queries.
|
||||||
|
/// Hence, during development a migrated SQLite file is required.
|
||||||
|
/// Use `cargo sqlx database reset` if required.
|
||||||
|
/// Otherwise, set the env var `SQLX_OFFLINE=true` during compilation (not helpful for development).
|
||||||
|
pub async fn create_pool(db_url: String) -> DbPool {
|
||||||
// Create SQLite database file if it does not exist
|
// Create SQLite database file if it does not exist
|
||||||
if db_url.starts_with("sqlite:") && db_url != ("sqlite::memory:") {
|
if db_url.starts_with("sqlite:") && db_url != ("sqlite::memory:") {
|
||||||
let path = db_url.replace("sqlite:", "");
|
let path = db_url.replace("sqlite:", "");
|
||||||
if !Path::new(&path).exists() {
|
if !Path::new(&path).exists() {
|
||||||
warn!("Sqlite database does not exist, creating file {}", path);
|
warn!("Sqlite database does not exist, creating file {path}");
|
||||||
File::create(&path).expect("Failed to create database file");
|
File::create(&path).expect("Failed to create database file");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
330
src/storage/sealing.rs
Normal file
330
src/storage/sealing.rs
Normal file
|
|
@ -0,0 +1,330 @@
|
||||||
|
#[cfg(feature = "shamir")]
|
||||||
|
pub mod shamir;
|
||||||
|
pub mod simple;
|
||||||
|
|
||||||
|
use aes_gcm_siv::{
|
||||||
|
AeadCore, Aes256GcmSiv, KeyInit,
|
||||||
|
aead::{Aead, OsRng},
|
||||||
|
};
|
||||||
|
use log::{error, info, warn};
|
||||||
|
use simple::SimpleSealing;
|
||||||
|
use tokio::sync::RwLock;
|
||||||
|
|
||||||
|
use super::DbPool;
|
||||||
|
|
||||||
|
#[derive(PartialEq)]
|
||||||
|
enum KeyEnum {
|
||||||
|
/// Final key
|
||||||
|
MainKey(Vec<u8>),
|
||||||
|
/// Encrypted with single secret (protected_rk, nonce)
|
||||||
|
Simple(SimpleSealing),
|
||||||
|
#[cfg(feature = "shamir")]
|
||||||
|
// Shamir's Secret Sharing
|
||||||
|
Shamir(shamir::ShamirBucket),
|
||||||
|
/// Unknown or not initialized
|
||||||
|
Uninitialized,
|
||||||
|
}
|
||||||
|
|
||||||
|
trait Sealing {
|
||||||
|
fn new(protected_rk: Vec<u8>, nonce: Vec<u8>) -> Self;
|
||||||
|
|
||||||
|
async fn unseal(&mut self, key: String) -> UnsealResult;
|
||||||
|
}
|
||||||
|
|
||||||
|
struct ProtectedRK {
|
||||||
|
pub protection_type: String,
|
||||||
|
pub encrypted_key: Vec<u8>,
|
||||||
|
pub nonce: Option<Vec<u8>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
static ROOT_KEY_MAYBE: RwLock<KeyEnum> = RwLock::const_new(KeyEnum::Uninitialized);
|
||||||
|
|
||||||
|
/// Returns `true` if vault is initialized or unsealed.
|
||||||
|
/// Returns `false` if uninitialized (nothing in the database).
|
||||||
|
pub async fn prepare_unseal(pool: &DbPool) -> bool {
|
||||||
|
{
|
||||||
|
if !matches!(*ROOT_KEY_MAYBE.read().await, KeyEnum::Uninitialized) {
|
||||||
|
info!("Vault unseal is already prepared");
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let lock = ROOT_KEY_MAYBE.write(); // Not awaited just here
|
||||||
|
|
||||||
|
let rk = sqlx::query_as!(
|
||||||
|
ProtectedRK,
|
||||||
|
"SELECT encrypted_key, type as protection_type, nonce FROM root_key ORDER BY version LIMIT 1"
|
||||||
|
)
|
||||||
|
.fetch_optional(pool)
|
||||||
|
.await
|
||||||
|
.expect("Failed to optionally read root key from the database");
|
||||||
|
|
||||||
|
let v = match rk {
|
||||||
|
Some(v) => v,
|
||||||
|
None => {
|
||||||
|
warn!("No root key was found in the database!");
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
info!(
|
||||||
|
"Root key of type {} found in the database",
|
||||||
|
v.protection_type
|
||||||
|
);
|
||||||
|
|
||||||
|
let mut lock = lock.await;
|
||||||
|
let nonce = v.nonce.expect("Simple encryption but the nonce is missing");
|
||||||
|
let res = match &*v.protection_type {
|
||||||
|
#[cfg(feature = "insecure-dev-sealing")]
|
||||||
|
"dev_only" => {
|
||||||
|
warn!(
|
||||||
|
"Root key is of type {}. This is INSECURE and must only be used for development purposes!",
|
||||||
|
v.protection_type
|
||||||
|
);
|
||||||
|
KeyEnum::MainKey(v.encrypted_key)
|
||||||
|
}
|
||||||
|
#[cfg(not(feature = "insecure-dev-sealing"))]
|
||||||
|
"dev_only" => panic!(
|
||||||
|
r#"Database is insecure but "insecure-dev-sealing" is not enabled for this build!"#
|
||||||
|
),
|
||||||
|
"simple" => KeyEnum::Simple(SimpleSealing::new(v.encrypted_key, nonce)),
|
||||||
|
#[cfg(feature = "shamir")]
|
||||||
|
"shamir" => KeyEnum::Shamir(shamir::ShamirBucket::new(v.encrypted_key, nonce)),
|
||||||
|
#[cfg(not(feature = "shamir"))]
|
||||||
|
"shamir" => panic!(r#"Feature "shamir" is not enabled for this build!"#),
|
||||||
|
_ => panic!("Unknown root key type in database"),
|
||||||
|
};
|
||||||
|
*lock = res;
|
||||||
|
true
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Must NOT be used in production.
|
||||||
|
/// Token is plainly stored in the database and will be unsealed directly by [prepare_unseal]!
|
||||||
|
/// Danger!
|
||||||
|
#[cfg(feature = "insecure-dev-sealing")]
|
||||||
|
pub async fn init_insecure_in_db(pool: &DbPool) {
|
||||||
|
let root_key = Aes256GcmSiv::generate_key(&mut OsRng);
|
||||||
|
let root_key = root_key.as_slice().to_owned();
|
||||||
|
|
||||||
|
warn!(
|
||||||
|
"Danger: INSECURE! Generated root key is stored plainly in the database. Must ONLY be used for development!"
|
||||||
|
);
|
||||||
|
write_new_root_key(pool, root_key, "dev_only", Some(b"")).await;
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn write_new_root_key(
|
||||||
|
pool: &DbPool,
|
||||||
|
protected_key: Vec<u8>,
|
||||||
|
type_to_be: &str,
|
||||||
|
nonce: Option<&[u8]>,
|
||||||
|
) {
|
||||||
|
let _ = sqlx::query!(
|
||||||
|
"
|
||||||
|
INSERT INTO root_key (encrypted_key, type, version, nonce)
|
||||||
|
VALUES ($1, $2, 1, $3)
|
||||||
|
",
|
||||||
|
protected_key,
|
||||||
|
type_to_be,
|
||||||
|
nonce
|
||||||
|
)
|
||||||
|
.execute(pool)
|
||||||
|
.await
|
||||||
|
.expect("Failed to write new root key to the database");
|
||||||
|
|
||||||
|
info!("Initialized new root key!");
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn reseal(pool: &DbPool) {
|
||||||
|
{
|
||||||
|
let mut lock = ROOT_KEY_MAYBE.write().await;
|
||||||
|
*lock = KeyEnum::Uninitialized;
|
||||||
|
}
|
||||||
|
prepare_unseal(pool).await;
|
||||||
|
}
|
||||||
|
|
||||||
|
// pub async fn sealing_status() {
|
||||||
|
// let lock = ROOT_KEY_MAYBE.read().await;
|
||||||
|
// match &*lock {
|
||||||
|
// KeyEnum::MainKey(_) => todo!(),
|
||||||
|
// KeyEnum::Simple(_, _) => todo!(),
|
||||||
|
// KeyEnum::Uninitialized => todo!(),
|
||||||
|
// KeyEnum::Shamir(_, _) => todo!(),
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
|
||||||
|
pub async fn provide_key(key: String) -> UnsealResult {
|
||||||
|
// First, check if we need to write-lock at all
|
||||||
|
{
|
||||||
|
let read_lock = ROOT_KEY_MAYBE.read().await;
|
||||||
|
if matches!(*read_lock, KeyEnum::MainKey(_)) {
|
||||||
|
info!("Providing keys is useless since vault is already unlocked");
|
||||||
|
return UnsealResult::AlreadyDone;
|
||||||
|
} else if matches!(*read_lock, KeyEnum::Uninitialized) {
|
||||||
|
error!("Cannot process provided key when the vault is uninitialized");
|
||||||
|
return UnsealResult::Uninitialized;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// A write lock is necessary.
|
||||||
|
let mut write_lock = ROOT_KEY_MAYBE.write().await;
|
||||||
|
let rk = match &mut *write_lock {
|
||||||
|
KeyEnum::MainKey(_) | KeyEnum::Uninitialized => {
|
||||||
|
unreachable!("Should have been checked above")
|
||||||
|
}
|
||||||
|
KeyEnum::Simple(simple) => simple.unseal(key).await,
|
||||||
|
#[cfg(feature = "shamir")]
|
||||||
|
KeyEnum::Shamir(shamir) => shamir.unseal(key).await,
|
||||||
|
};
|
||||||
|
let rk = match rk {
|
||||||
|
UnsealResult::DoneConfidential(rk) => rk,
|
||||||
|
UnsealResult::Done => unreachable!(),
|
||||||
|
reject_action => return reject_action,
|
||||||
|
};
|
||||||
|
*write_lock = KeyEnum::MainKey(rk);
|
||||||
|
|
||||||
|
info!("Unsealing done; Vault ready");
|
||||||
|
UnsealResult::Done
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct Secret {
|
||||||
|
pub nonce: [u8; 12],
|
||||||
|
pub protected_data: Vec<u8>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Secret {
|
||||||
|
pub fn new<D, N>(data: D, nonce: N) -> Self
|
||||||
|
where
|
||||||
|
D: Into<Vec<u8>>,
|
||||||
|
N: AsRef<[u8]>,
|
||||||
|
{
|
||||||
|
let nonce_slice = nonce.as_ref();
|
||||||
|
assert!(
|
||||||
|
nonce_slice.len() == 12,
|
||||||
|
"Nonce must be exactly 12 bytes long"
|
||||||
|
);
|
||||||
|
|
||||||
|
let nonce: &[u8; 12] = nonce_slice.try_into().expect("Nonce must be 12 bytes long");
|
||||||
|
|
||||||
|
Self {
|
||||||
|
nonce: *nonce,
|
||||||
|
protected_data: data.into(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Encrypt a secret
|
||||||
|
///
|
||||||
|
/// # Errors
|
||||||
|
///
|
||||||
|
/// This function will return an error if the vault is uninitialized or an unknown error occurs.
|
||||||
|
pub async fn encrypt(data: &String) -> Result<Self, ()> {
|
||||||
|
let cipher = if let KeyEnum::MainKey(key) = &*ROOT_KEY_MAYBE.read().await {
|
||||||
|
match Aes256GcmSiv::new_from_slice(key) {
|
||||||
|
Ok(v) => v,
|
||||||
|
Err(e) => {
|
||||||
|
error!("Failed to create new AesGcmSiv cipher from variable size key: {e}");
|
||||||
|
return Err(());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
error!("Cannot encrypt secret since the vault is not unsealed");
|
||||||
|
return Err(());
|
||||||
|
};
|
||||||
|
|
||||||
|
let nonce: aes_gcm_siv::aead::generic_array::GenericArray<
|
||||||
|
u8,
|
||||||
|
<Aes256GcmSiv as aes_gcm_siv::AeadCore>::NonceSize,
|
||||||
|
> = Aes256GcmSiv::generate_nonce(&mut OsRng); // 96-bits; unique per message
|
||||||
|
let enc = match cipher.encrypt(&nonce, data.as_bytes()) {
|
||||||
|
Ok(v) => v,
|
||||||
|
Err(e) => {
|
||||||
|
error!("Failed to encrypt secret with cipher: {e}");
|
||||||
|
return Err(());
|
||||||
|
}
|
||||||
|
};
|
||||||
|
debug_assert!(nonce.len() == 12, "Nonce should be exactly 12 bytes");
|
||||||
|
let nonce = match nonce.as_slice().try_into() {
|
||||||
|
Ok(v) => v,
|
||||||
|
Err(e) => {
|
||||||
|
error!("Nonce should be exactly 12 bytes: {e}");
|
||||||
|
return Err(());
|
||||||
|
}
|
||||||
|
};
|
||||||
|
Ok(Self {
|
||||||
|
nonce,
|
||||||
|
protected_data: enc,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn decrypt_bytes(self) -> Result<Vec<u8>, ()> {
|
||||||
|
assert!(self.nonce.len() == 12);
|
||||||
|
let cipher = match &*ROOT_KEY_MAYBE.read().await {
|
||||||
|
KeyEnum::MainKey(key) => Aes256GcmSiv::new_from_slice(key),
|
||||||
|
_ => panic!("Cannot seal secret since the vault is not unsealed"),
|
||||||
|
}
|
||||||
|
.expect("Failed to create new AesGcmSiv cipher from variable size key");
|
||||||
|
|
||||||
|
let nonce = aes_gcm_siv::aead::generic_array::GenericArray::from_slice(&self.nonce);
|
||||||
|
let enc = match cipher.decrypt(nonce, self.protected_data.as_ref()) {
|
||||||
|
Ok(v) => v,
|
||||||
|
Err(e) => {
|
||||||
|
error!("Failed to decrypt secret with given nonce and cipher: {e}");
|
||||||
|
return Err(());
|
||||||
|
}
|
||||||
|
};
|
||||||
|
Ok(enc)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn decrypt(self) -> Result<String, ()> {
|
||||||
|
String::from_utf8(self.decrypt_bytes().await?).map_err(|e| {
|
||||||
|
error!("Failed to parse secret as UTF8: {e}");
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub enum UnsealResult {
|
||||||
|
/// Unsealing finished, with root key hidden
|
||||||
|
Done,
|
||||||
|
/// Was already unsealed, no action taken
|
||||||
|
AlreadyDone,
|
||||||
|
/// Could not unseal as the vault is uninitialized
|
||||||
|
Uninitialized,
|
||||||
|
|
||||||
|
/// Unsealing finished, returns root key
|
||||||
|
DoneConfidential(Vec<u8>),
|
||||||
|
/// Unsealing attempt has been recorded but is not sufficient
|
||||||
|
Unfinished,
|
||||||
|
/// The provided or the set of previously provided portions are invalid.
|
||||||
|
/// Unsealing has been reset.
|
||||||
|
InvalidReset,
|
||||||
|
/// Duplicate share
|
||||||
|
Duplicate,
|
||||||
|
/// Error processing share, invalid
|
||||||
|
InvalidRejected,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn init_default(pool: &DbPool) {
|
||||||
|
#[cfg(feature = "insecure-dev-sealing")]
|
||||||
|
let user_key = {
|
||||||
|
storage::sealing::init_insecure_in_db(&pool).await;
|
||||||
|
"INSECURE automatic unlock - TESTING ONLY"
|
||||||
|
};
|
||||||
|
|
||||||
|
#[cfg(not(feature = "insecure-dev-sealing"))]
|
||||||
|
let user_key = {
|
||||||
|
#[cfg(not(feature = "shamir"))]
|
||||||
|
{
|
||||||
|
simple::init_simple(&pool).await
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(feature = "shamir")]
|
||||||
|
{
|
||||||
|
shamir::init_shamir(pool, 2, 5).await
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let success = prepare_unseal(pool).await;
|
||||||
|
warn!("New sealing password generated: {user_key:?}");
|
||||||
|
assert!(
|
||||||
|
success,
|
||||||
|
"Vault ought to have been initialized just now but it is not."
|
||||||
|
);
|
||||||
|
}
|
||||||
225
src/storage/sealing/shamir.rs
Normal file
225
src/storage/sealing/shamir.rs
Normal file
|
|
@ -0,0 +1,225 @@
|
||||||
|
use aes_gcm_siv::{
|
||||||
|
AeadCore, Aes256GcmSiv, KeyInit,
|
||||||
|
aead::{Aead, OsRng, generic_array::GenericArray},
|
||||||
|
};
|
||||||
|
use base64::{Engine, prelude::BASE64_STANDARD};
|
||||||
|
use log::{error, info, warn};
|
||||||
|
use p256::{NonZeroScalar, Scalar, SecretKey};
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
use serde_json::json;
|
||||||
|
use vsss_rs::{
|
||||||
|
DefaultShare, Error as VsssErr, IdentifierPrimeField, ReadableShareSet, ShareElement,
|
||||||
|
ValuePrimeField,
|
||||||
|
};
|
||||||
|
use zeroize::ZeroizeOnDrop;
|
||||||
|
|
||||||
|
use crate::DbPool;
|
||||||
|
|
||||||
|
use super::{Sealing, UnsealResult, write_new_root_key};
|
||||||
|
|
||||||
|
type P256Share = DefaultShare<IdentifierPrimeField<Scalar>, IdentifierPrimeField<Scalar>>;
|
||||||
|
|
||||||
|
#[derive(PartialEq, Clone, Debug, Serialize, Deserialize, ZeroizeOnDrop)]
|
||||||
|
/// Differs from [P256Share] by containing Strings
|
||||||
|
struct ShamirPortion {
|
||||||
|
#[serde(rename = "i")]
|
||||||
|
pub identifier: Vec<u8>,
|
||||||
|
#[serde(rename = "v")]
|
||||||
|
pub value: Vec<u8>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(PartialEq)]
|
||||||
|
/// Container for multiple [ShamirPortion]s and the protected root key.
|
||||||
|
/// Multiple instances could exist in the future for per-namespace encryption.
|
||||||
|
pub struct ShamirBucket {
|
||||||
|
portions: Vec<ShamirPortion>,
|
||||||
|
protected_rk: Vec<u8>,
|
||||||
|
nonce: Vec<u8>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Sealing for ShamirBucket {
|
||||||
|
fn new(protected_rk: Vec<u8>, nonce: Vec<u8>) -> Self {
|
||||||
|
Self {
|
||||||
|
portions: Vec::with_capacity(2),
|
||||||
|
protected_rk,
|
||||||
|
nonce,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn unseal(&mut self, key: String) -> UnsealResult {
|
||||||
|
let key = match BASE64_STANDARD.decode(key) {
|
||||||
|
Ok(v) => v,
|
||||||
|
Err(e) => {
|
||||||
|
warn!("Portion could not be decoded: {e}");
|
||||||
|
return UnsealResult::InvalidRejected;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
let key_portion: ShamirPortion = match serde_json::from_slice(&key) {
|
||||||
|
Ok(v) => v,
|
||||||
|
Err(e) => {
|
||||||
|
info!("Portion could not be parsed: {e}");
|
||||||
|
return UnsealResult::InvalidRejected;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
if self.portions.contains(&key_portion) {
|
||||||
|
warn!("The supplied Shamir portion is already known. Duplication ignored.");
|
||||||
|
return UnsealResult::Duplicate;
|
||||||
|
}
|
||||||
|
self.portions.push(key_portion);
|
||||||
|
|
||||||
|
let joined_keys = match join_keys(&self.portions) {
|
||||||
|
Ok(v) => v,
|
||||||
|
Err(e) => {
|
||||||
|
return match e {
|
||||||
|
VsssErr::SharingMinThreshold => {
|
||||||
|
info!("Shamir portion provided. Sharing threshold not reached.");
|
||||||
|
UnsealResult::Unfinished
|
||||||
|
},
|
||||||
|
VsssErr::SharingDuplicateIdentifier => unreachable!("Addition of duplicate keys should have been prevented by not recording them"),
|
||||||
|
e => {
|
||||||
|
error!("Unknown error occurred upon joining keys {e:?}");
|
||||||
|
unreachable!()
|
||||||
|
},
|
||||||
|
};
|
||||||
|
}
|
||||||
|
}
|
||||||
|
.to_bytes();
|
||||||
|
|
||||||
|
let cipher = match Aes256GcmSiv::new_from_slice(&joined_keys) {
|
||||||
|
Ok(v) => v,
|
||||||
|
Err(e) => {
|
||||||
|
info!("Cipher could not be created from slice: {e}");
|
||||||
|
return UnsealResult::InvalidRejected;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
debug_assert_eq!(self.nonce.len(), 12);
|
||||||
|
let nonce = aes_gcm_siv::aead::generic_array::GenericArray::from_slice(&self.nonce);
|
||||||
|
let root_key = cipher.decrypt(nonce, self.protected_rk.as_ref());
|
||||||
|
match root_key {
|
||||||
|
Ok(v) => UnsealResult::DoneConfidential(v),
|
||||||
|
Err(_) => {
|
||||||
|
// Err is opaque on purpose
|
||||||
|
self.portions.clear();
|
||||||
|
warn!(
|
||||||
|
"Enough shares have been provided but the set of shares is invalid. The set of shares has been reset."
|
||||||
|
);
|
||||||
|
UnsealResult::InvalidReset
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Shamir Secret Sharing does not verify a portion for validity,
|
||||||
|
/// unlike Feldman Verified Secret Sharing, which is built on Shamir.
|
||||||
|
/// "Validation" happens by attempting to decrypt the root key.
|
||||||
|
///
|
||||||
|
/// # Returns
|
||||||
|
/// List of encoded key portions
|
||||||
|
pub async fn init_shamir(pool: &DbPool, threshold: usize, limit: usize) -> Vec<String> {
|
||||||
|
let root_key = Aes256GcmSiv::generate_key(&mut OsRng);
|
||||||
|
let nonce: GenericArray<u8, <Aes256GcmSiv as AeadCore>::NonceSize> =
|
||||||
|
Aes256GcmSiv::generate_nonce(&mut OsRng); // 96-bits; unique per message
|
||||||
|
let root_key = root_key.as_slice().to_owned();
|
||||||
|
|
||||||
|
let (user_key, protected_rk) = {
|
||||||
|
let key = Aes256GcmSiv::generate_key(&mut OsRng);
|
||||||
|
let cipher = Aes256GcmSiv::new(&key);
|
||||||
|
let nonce: &[u8] = nonce.as_slice();
|
||||||
|
debug_assert_eq!(nonce.len(), 12);
|
||||||
|
let nonce = aes_gcm_siv::aead::generic_array::GenericArray::from_slice(nonce);
|
||||||
|
let enc = cipher.encrypt(nonce, root_key.as_slice()).unwrap();
|
||||||
|
(key, enc)
|
||||||
|
};
|
||||||
|
|
||||||
|
let portions = share_keys(&mut OsRng, threshold, limit, &user_key);
|
||||||
|
|
||||||
|
log::debug!("Shared Keys: {portions:?}");
|
||||||
|
|
||||||
|
write_new_root_key(pool, protected_rk, "shamir", Some(nonce.as_slice())).await;
|
||||||
|
portions
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns a Vec of Base64 encoded JSON-wrapped identifier-value pairs
|
||||||
|
fn share_keys(
|
||||||
|
mut osrng: &mut OsRng,
|
||||||
|
threshold: usize,
|
||||||
|
limit: usize,
|
||||||
|
root_key: &[u8],
|
||||||
|
) -> Vec<String> {
|
||||||
|
log::debug!("RK: {root_key:?}");
|
||||||
|
assert!(
|
||||||
|
threshold <= limit,
|
||||||
|
"Threshold cannot be higher than the number of shares (limit)"
|
||||||
|
);
|
||||||
|
|
||||||
|
let rk_array = GenericArray::from_slice(root_key);
|
||||||
|
let rk_scalar = NonZeroScalar::from_repr(*rk_array).unwrap();
|
||||||
|
let shared_secret = IdentifierPrimeField(*rk_scalar.as_ref());
|
||||||
|
let res =
|
||||||
|
vsss_rs::shamir::split_secret::<P256Share>(threshold, limit, &shared_secret, &mut osrng);
|
||||||
|
|
||||||
|
res.unwrap()
|
||||||
|
.iter()
|
||||||
|
.map(|f| {
|
||||||
|
BASE64_STANDARD.encode(
|
||||||
|
json!(ShamirPortion {
|
||||||
|
identifier: f.identifier.to_vec(),
|
||||||
|
value: f.value.to_vec(),
|
||||||
|
})
|
||||||
|
.to_string(),
|
||||||
|
)
|
||||||
|
})
|
||||||
|
.collect()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn join_keys(shares: &[ShamirPortion]) -> Result<SecretKey, vsss_rs::Error> {
|
||||||
|
let shares: Vec<P256Share> = shares
|
||||||
|
.iter()
|
||||||
|
.map(|portion| {
|
||||||
|
let identifier = IdentifierPrimeField::<Scalar>::from_slice(&portion.identifier)
|
||||||
|
.map_err(|e| {
|
||||||
|
info!("Portion could not be converted to IdentifierPrimeField: {e}");
|
||||||
|
VsssErr::InvalidShare
|
||||||
|
})?;
|
||||||
|
let value = ValuePrimeField::<Scalar>::from_slice(&portion.value).map_err(|e| {
|
||||||
|
info!("Portion could not be converted to ValuePrimeField: {e}");
|
||||||
|
VsssErr::InvalidShare
|
||||||
|
})?;
|
||||||
|
Ok(P256Share { identifier, value })
|
||||||
|
})
|
||||||
|
.collect::<Result<_, VsssErr>>()?;
|
||||||
|
|
||||||
|
let scalar = shares.combine()?;
|
||||||
|
// A little suboptimal thanks to CtOption
|
||||||
|
let nzs = match NonZeroScalar::from_repr(scalar.0.into()).into_option() {
|
||||||
|
Some(v) => v,
|
||||||
|
None => return Err(VsssErr::InvalidShare),
|
||||||
|
};
|
||||||
|
let sk = SecretKey::from(nzs);
|
||||||
|
Ok(sk)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn split_and_join() {
|
||||||
|
let root_key = Aes256GcmSiv::generate_key(&mut OsRng);
|
||||||
|
let root_key = root_key.as_slice().to_owned();
|
||||||
|
let kps = share_keys(&mut OsRng, 2, 5, &root_key);
|
||||||
|
|
||||||
|
let kps: Vec<_> = kps
|
||||||
|
.iter()
|
||||||
|
.map(|f| {
|
||||||
|
let b = BASE64_STANDARD
|
||||||
|
.decode(f)
|
||||||
|
.expect("A portion could not be decoded from BASE64");
|
||||||
|
serde_json::from_slice(&b).expect("A portion could not be parsed as a key pair")
|
||||||
|
})
|
||||||
|
.collect();
|
||||||
|
let k = join_keys(&kps).expect("Error on joining key pairs");
|
||||||
|
|
||||||
|
assert_eq!(
|
||||||
|
root_key,
|
||||||
|
k.to_bytes().as_slice(),
|
||||||
|
"Original key and re-combined key from shares are not equal"
|
||||||
|
);
|
||||||
|
}
|
||||||
48
src/storage/sealing/simple.rs
Normal file
48
src/storage/sealing/simple.rs
Normal file
|
|
@ -0,0 +1,48 @@
|
||||||
|
use aes_gcm_siv::{
|
||||||
|
AeadCore, Aes256GcmSiv, KeyInit,
|
||||||
|
aead::{Aead, OsRng, generic_array::GenericArray},
|
||||||
|
};
|
||||||
|
use base64::{Engine, prelude::BASE64_STANDARD};
|
||||||
|
|
||||||
|
use crate::DbPool;
|
||||||
|
|
||||||
|
use super::{Sealing, UnsealResult, write_new_root_key};
|
||||||
|
|
||||||
|
/// Pair of protected root key and nonce
|
||||||
|
#[derive(PartialEq)]
|
||||||
|
pub struct SimpleSealing(Vec<u8>, Vec<u8>);
|
||||||
|
|
||||||
|
impl Sealing for SimpleSealing {
|
||||||
|
fn new(protected_rk: Vec<u8>, nonce: Vec<u8>) -> Self {
|
||||||
|
Self(protected_rk, nonce)
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn unseal(&mut self, key: String) -> UnsealResult {
|
||||||
|
let key = BASE64_STANDARD.decode(key).unwrap();
|
||||||
|
let cipher = Aes256GcmSiv::new_from_slice(&key).unwrap();
|
||||||
|
debug_assert_eq!(self.1.len(), 12);
|
||||||
|
let nonce = aes_gcm_siv::aead::generic_array::GenericArray::from_slice(self.1.as_slice());
|
||||||
|
UnsealResult::DoneConfidential(cipher.decrypt(nonce, self.0.as_ref()).unwrap())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Initialize the vault with a simple password
|
||||||
|
#[allow(unused)]
|
||||||
|
pub async fn init_simple(pool: &DbPool) -> String {
|
||||||
|
let root_key = Aes256GcmSiv::generate_key(&mut OsRng);
|
||||||
|
let nonce: GenericArray<u8, <Aes256GcmSiv as AeadCore>::NonceSize> =
|
||||||
|
Aes256GcmSiv::generate_nonce(&mut OsRng); // 96-bits; unique per message
|
||||||
|
let root_key = root_key.as_slice().to_owned();
|
||||||
|
|
||||||
|
let (user_key, protected_rk) = {
|
||||||
|
let key = Aes256GcmSiv::generate_key(&mut OsRng);
|
||||||
|
let cipher = Aes256GcmSiv::new(&key);
|
||||||
|
let nonce: &[u8] = nonce.as_slice();
|
||||||
|
debug_assert_eq!(nonce.len(), 12);
|
||||||
|
let nonce = aes_gcm_siv::aead::generic_array::GenericArray::from_slice(nonce);
|
||||||
|
let enc = cipher.encrypt(nonce, root_key.as_slice()).unwrap();
|
||||||
|
(key, enc)
|
||||||
|
};
|
||||||
|
write_new_root_key(pool, protected_rk, "simple", Some(nonce.as_slice())).await;
|
||||||
|
BASE64_STANDARD.encode(user_key)
|
||||||
|
}
|
||||||
16
src/sys.rs
16
src/sys.rs
|
|
@ -1,8 +1,16 @@
|
||||||
use axum::Router;
|
mod root_generation;
|
||||||
|
mod sealing;
|
||||||
|
|
||||||
use crate::storage::DatabaseDriver;
|
use axum::Router;
|
||||||
|
use root_generation::root_generation;
|
||||||
|
use sealing::sealing_routes;
|
||||||
|
|
||||||
|
use crate::storage::DbPool;
|
||||||
|
|
||||||
/// System routes
|
/// System routes
|
||||||
pub fn sys_router(pool: DatabaseDriver) -> Router<DatabaseDriver> {
|
pub fn sys_router(pool: DbPool) -> Router<DbPool> {
|
||||||
Router::new().with_state(pool)
|
Router::new()
|
||||||
|
.merge(sealing_routes())
|
||||||
|
.merge(root_generation())
|
||||||
|
.with_state(pool)
|
||||||
}
|
}
|
||||||
|
|
|
||||||
14
src/sys/root_generation.rs
Normal file
14
src/sys/root_generation.rs
Normal file
|
|
@ -0,0 +1,14 @@
|
||||||
|
use axum::{Router, routing::post};
|
||||||
|
|
||||||
|
use crate::DbPool;
|
||||||
|
|
||||||
|
pub fn root_generation() -> Router<DbPool> {
|
||||||
|
Router::new()
|
||||||
|
// .route("/generate-root", get(get_root_generation_attempt))
|
||||||
|
// .route("/generate-root", delete(cancel_generate_root))
|
||||||
|
.route("/generate-root", post(generate_new_root))
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn generate_new_root() {
|
||||||
|
todo!()
|
||||||
|
}
|
||||||
54
src/sys/sealing.rs
Normal file
54
src/sys/sealing.rs
Normal file
|
|
@ -0,0 +1,54 @@
|
||||||
|
use axum::{
|
||||||
|
Json, Router,
|
||||||
|
extract::State,
|
||||||
|
routing::{get, post, put},
|
||||||
|
};
|
||||||
|
use log::warn;
|
||||||
|
use serde::Deserialize;
|
||||||
|
|
||||||
|
use crate::storage::{DbPool, sealing};
|
||||||
|
|
||||||
|
pub fn sealing_routes() -> Router<DbPool> {
|
||||||
|
Router::new()
|
||||||
|
.route("/seal", post(seal_post))
|
||||||
|
.route("/seal-status", get(seal_status_get))
|
||||||
|
.route("/unseal", post(unseal_post))
|
||||||
|
// Again? Its supposed to be POST but actually a PUT
|
||||||
|
.route("/unseal", put(unseal_post))
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn seal_post(State(pool): State<DbPool>) {
|
||||||
|
sealing::reseal(&pool).await;
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize)]
|
||||||
|
struct UnsealRequest {
|
||||||
|
/// Required, unless `reset` is true
|
||||||
|
pub key: Option<String>,
|
||||||
|
#[serde(default)]
|
||||||
|
/// Specifies if previously-provided unseal keys are discarded and the unseal process is reset.
|
||||||
|
pub reset: bool,
|
||||||
|
// #[serde(default)]
|
||||||
|
// /// Used to migrate the seal from shamir to autoseal or autoseal to shamir. Must be provided on all unseal key calls.
|
||||||
|
// pub migrate: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn unseal_post(State(pool): State<DbPool>, Json(req): Json<UnsealRequest>) -> Result<(), ()> {
|
||||||
|
if req.reset {
|
||||||
|
warn!("Unsealing progress has been reset on unseal request");
|
||||||
|
sealing::reseal(&pool).await;
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(key) = req.key {
|
||||||
|
sealing::provide_key(key).await;
|
||||||
|
} else if !req.reset {
|
||||||
|
// No request key nor reset = bad request
|
||||||
|
return Err(());
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn seal_status_get(State(_pool): State<DbPool>) -> &'static str {
|
||||||
|
todo!("not implemented")
|
||||||
|
}
|
||||||
Loading…
Reference in a new issue