an working version of my own acme client based on acme2-eab

Dieser Commit ist enthalten in:
Sebastian Tobie 2025-05-11 20:57:24 +02:00
Commit c02b74ba69
15 geänderte Dateien mit 4291 neuen und 0 gelöschten Zeilen

9
.cargo/config.toml Normale Datei
Datei anzeigen

@ -0,0 +1,9 @@
[env]
RUST_LOG="TRACE"
#RUST_LOG="DEBUG"
SSL_CERT_DIR="/etc/ca-certificates/extracted/cadir/"
SSL_CERT_FILE="/etc/ca-certificates/extracted/tls-ca-bundle.pem"
[target]
[target.'cfg(debug_assertions)']
runner = "strace -e trace=open,openat -P /etc/*"

3
.gitignore gevendort Normale Datei
Datei anzeigen

@ -0,0 +1,3 @@
/target
/test
/racme.toml

54
.pre-commit-config.yaml Normale Datei
Datei anzeigen

@ -0,0 +1,54 @@
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v5.0.0
hooks:
- id: trailing-whitespace
- id: end-of-file-fixer
- id: check-yaml
- id: check-added-large-files
- id: check-executables-have-shebangs
- id: check-json
- id: check-toml
- id: check-shebang-scripts-are-executable
- id: check-symlinks
- id: check-vcs-permalinks
- id: check-yaml
- id: detect-private-key
- id: fix-byte-order-marker
- id: forbid-submodules
- id: mixed-line-ending
args:
- --fix=lf
- id: trailing-whitespace
- id: destroyed-symlinks
- repo: local
hooks:
- id: fmt
name: fmt
description: Format files with cargo fmt.
entry: cargo +nightly fmt
language: system
types:
- rust
args:
- --
- id: cargo-check
name: cargo check
description: Check the package for errors.
entry: cargo check
language: system
types:
- rust
pass_filenames: false
- id: clippy
name: clippy
description: Lint rust sources
entry: cargo +nightly clippy
language: system
args:
- --
- -D
- warnings
types:
- rust
pass_filenames: false

2903
Cargo.lock generiert Normale Datei

Datei-Diff unterdrückt, da er zu groß ist Diff laden

78
Cargo.toml Normale Datei
Datei anzeigen

@ -0,0 +1,78 @@
[package]
name = "racme"
version = "0.1.0"
edition = "2024"
resolver = "3"
[features]
unstable = []
[dependencies]
data-encoding = { version = "2.9.0", default-features = false, features = ["alloc", "std"] }
env_logger = "0.11.8"
lazy_static = "1.5.0"
toml = "0.8.20"
[dependencies.macro_rules_attribute]
version = "0.2.0"
default-features = false
[dependencies.acme2-eab]
version = "0"
default-features = false
[dependencies.openssl]
version = "0.10.72"
default-features = false
[dependencies.pem]
version = "3.0.5"
default-features = false
features = ["serde", "std"]
[dependencies.tokio-stream]
version = "0.1.17"
default-features = false
features = ["fs"]
[dependencies.async-scoped]
version = "0.9.0"
default-features = false
features = ["use-tokio"]
[dependencies.libsystemd]
version = "0.7.0"
default-features = false
[dependencies.zbus_systemd]
version = "0.25701.0"
default-features = false
features = ["systemd1"]
[dependencies.log]
version = "0.4.27"
features = ["std"]
[dependencies.serde]
version = "1.0.219"
features = ["derive", "std"]
default-features = false
[dependencies.clap]
version = "4.5.36"
features = ["derive"]
[dependencies.tokio]
version = "1.44.2"
default-features = false
features = ["rt", "sync", "time", "net"]
[dependencies.reqwest]
version = "0.12.15"
default-features = false
features = ["rustls-tls-native-roots-no-provider", "default-tls"]
[patch.crates-io.acme2-eab]
path = "../acme2-eab"
[workspace]

21
LICENSE Normale Datei
Datei anzeigen

@ -0,0 +1,21 @@
MIT License
Copyright (c) 2025 [fullname]
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

0
README.md Normale Datei
Datei anzeigen

38
rustfmt.toml Normale Datei
Datei anzeigen

@ -0,0 +1,38 @@
array_width = 0
attr_fn_like_width = 80
binop_separator = "Back"
blank_lines_lower_bound = 0
blank_lines_upper_bound = 2
brace_style = "SameLineWhere"
chain_width = 144
combine_control_expr = false
comment_width = 160
condense_wildcard_suffixes = true
empty_item_single_line = true
enum_discrim_align_threshold = 20
fn_params_layout = "Tall"
force_explicit_abi = true
force_multiline_blocks = true
format_code_in_doc_comments = true
format_macro_bodies = true
hard_tabs = false
hex_literal_case = "Upper"
imports_granularity = "Crate"
imports_layout = "Vertical"
indent_style = "Block"
inline_attribute_width = 0
match_block_trailing_comma = true
max_width = 160
merge_derives = true
newline_style = "Unix"
remove_nested_parens = true
reorder_impl_items = true
reorder_imports = true
reorder_modules = true
single_line_if_else_max_width = 0
single_line_let_else_max_width = 0
style_edition = "2024"
tab_spaces = 4
use_field_init_shorthand = true
use_small_heuristics = "Off"
use_try_shorthand = true

253
src/config.rs Normale Datei
Datei anzeigen

@ -0,0 +1,253 @@
use crate::{
consts::{
BRAINPOOL_MIDDLE,
BRAINPOOL_STRONG,
BRAINPOOL_WEAK,
SECP_MIDDLE,
SECP_STRONG,
SECP_WEAK,
},
macros::DefDer,
match_error,
structs::Error,
};
use log::*;
use macro_rules_attribute::macro_rules_derive;
use openssl::pkey::{
Id,
PKey,
Private,
};
use serde::{
Deserialize,
de::DeserializeOwned,
};
use std::{
collections::HashMap,
net::IpAddr,
};
use tokio::{
fs::File,
io::AsyncReadExt,
};
type VString = Vec<String>;
#[macro_rules_derive(DefDer)]
#[derive(Deserialize)]
pub struct GeneralConfig {
#[serde(default = "GeneralConfig::default_accounts")]
pub accounts_path: String,
#[serde(default = "GeneralConfig::default_sites")]
pub sites_path: String,
#[serde(default = "GeneralConfig::default_challenge")]
pub http_challenge_path: Option<String>,
#[serde(default = "GeneralConfig::default_dns")]
pub dns: Option<Dns>,
#[serde(default = "GeneralConfig::default_certificates")]
pub certificates_path: String,
#[serde(default = "GeneralConfig::default_cas")]
pub ca: HashMap<String, CA>,
}
impl Default for GeneralConfig {
#[inline]
fn default() -> Self {
Self {
accounts_path: Self::default_accounts(),
sites_path: Self::default_sites(),
http_challenge_path: Self::default_challenge(),
dns: Self::default_dns(),
certificates_path: Self::default_certificates(),
ca: Self::default_cas(),
}
}
}
impl GeneralConfig {
#[inline]
fn default_accounts() -> String {
"accounts".into()
}
#[inline]
fn default_sites() -> String {
"sites".into()
}
#[inline]
fn default_challenge() -> Option<String> {
None
}
#[inline]
fn default_dns() -> Option<Dns> {
None
}
#[inline]
fn default_cas() -> HashMap<String, CA> {
HashMap::new()
}
#[inline]
fn default_certificates() -> String {
"certificates".into()
}
}
#[macro_rules_derive(DefDer)]
#[derive(Deserialize)]
pub struct Dns;
#[macro_rules_derive(DefDer)]
#[derive(Deserialize)]
pub struct Eab {
#[serde(rename = "eab_token", alias = "id")]
pub token: String,
#[serde(rename = "eab_key", alias = "key")]
pub key: String,
}
impl Eab {
pub fn key(&self) -> Result<PKey<Private>, Error> {
let decoded = &match_error!(data_encoding::BASE64URL_NOPAD.decode(self.key.as_bytes())=>Err(error)-> "Failed to decode the HMAC key for the eab_key: {error}", Error::err("Failed to decode eab_key".into()));
PKey::hmac(decoded).map_err(|error| Error::new(format!("Failed to parse the private key: {error}")))
}
}
#[macro_rules_derive(DefDer)]
#[derive(Deserialize)]
pub struct CA {
/// Url for the directory
pub directory: String,
/// Email addresses for the CA to contact the user
pub email_addresses: Option<VString>,
#[serde(flatten, default)]
pub eab: Option<Eab>,
/// Amount of days the certificate is renewed before the Certificate is outdated
/// TODO: give to processor
#[serde(default = "CA::default_renew")]
pub renew_before: u32,
#[serde(default)]
pub tos_accepted: bool,
}
impl CA {
fn default_renew() -> u32 {
7
}
}
#[macro_rules_derive(DefDer)]
#[derive(Copy, Deserialize, Default)]
pub enum Algorithm {
Rsa,
Brainpool,
Secp,
#[default]
ED25519,
}
#[macro_rules_derive(DefDer)]
#[derive(Copy, Deserialize, Default)]
pub enum Strength {
Weak,
Middle,
#[default]
Strong,
}
impl Strength {
pub fn rsabits(self) -> u32 {
self as u32
}
}
pub fn match_algo(key: &PKey<Private>, algorithm: Algorithm, strength: Strength) -> bool {
match (key.id(), algorithm) {
(Id::ED25519, Algorithm::ED25519) => true,
(Id::RSA, Algorithm::Rsa) if key.bits() == strength.rsabits() => true,
(Id::EC, Algorithm::Secp) | (Id::EC, Algorithm::Brainpool) => {
let pkey = key.ec_key().unwrap();
let curve = pkey.group().curve_name().unwrap();
match (algorithm, strength) {
(Algorithm::Secp, Strength::Weak) if SECP_WEAK == curve => true,
(Algorithm::Secp, Strength::Middle) if SECP_MIDDLE == curve => true,
(Algorithm::Secp, Strength::Strong) if SECP_STRONG == curve => true,
(Algorithm::Brainpool, Strength::Weak) if BRAINPOOL_WEAK == curve => true,
(Algorithm::Brainpool, Strength::Middle) if BRAINPOOL_MIDDLE == curve => true,
(Algorithm::Brainpool, Strength::Strong) if BRAINPOOL_STRONG == curve => true,
_ => false,
}
},
_ => false,
}
}
#[macro_rules_derive(DefDer)]
#[derive(Deserialize, Default)]
pub struct SiteConfig {
/// The Configured Certificate Authority
pub ca: String,
/// The Domains this site is responsible for
pub domains: VString,
/// IPAddresses for the Certificate
#[serde(default)]
pub addresses: Vec<IpAddr>,
/// EmailAdresses that this Certificate is valid for
#[serde(default)]
pub emails: VString,
/// The systemd services are reloaded
#[serde(default)]
pub reload_services: VString,
/// The Systemd-Services have to be restarted to get the new certificates
#[serde(default)]
pub restart_services: VString,
/// Commands that have to be run after the certificates have been issued if they don't have an systemd service
#[serde(default)]
pub trigger_commands: VString,
/// The Algorithm for the Private Key
#[serde(default)]
pub algorithm: Algorithm,
/// The Strength of the Private key.
#[serde(default)]
pub strength: Strength,
#[serde(skip)]
pub name: String,
}
pub async fn read_config<T: Default + DeserializeOwned>(mut file: File) -> T {
let mut data = String::new();
match file.read_to_string(&mut data).await {
Ok(_) => {},
Err(error) => {
warn!("Failed to load config: {error}");
return Default::default();
},
}
match toml::from_str(&data) {
Ok(output) => output,
Err(error) => {
warn!("Failed to parse toml file: {error}");
Default::default()
},
}
}

33
src/consts.rs Normale Datei
Datei anzeigen

@ -0,0 +1,33 @@
use std::time::Duration;
use openssl::nid::Nid;
use tokio::fs::OpenOptions;
use lazy_static::lazy_static;
lazy_static! {
pub static ref FILE_MODE: OpenOptions = OpenOptions::new().create(false).read(true).write(false).truncate(false).to_owned();
pub static ref FILE_MODE_WRITE: OpenOptions = OpenOptions::new().create(true).write(true).truncate(true).to_owned();
pub static ref LETS_ENCRYPT: String = String::from("letsencrypt");
pub static ref LETS_ENCRYPT_STAGING: String = String::from("letsencrypt-staging");
}
pub const POOL_SIZE: usize = 1;
pub const MAX_WAIT_TIME: Duration = Duration::from_secs(1 * 60);
pub const WAIT_TIME: Duration = Duration::from_secs(5);
pub const ATTEMPTS: usize = MAX_WAIT_TIME.div_duration_f64(WAIT_TIME) as usize;
#[repr(u32)]
pub enum RsaStrength {
Weak = 1024,
Middle = 2048,
Strong = 4096,
}
pub const SECP_WEAK: Nid = Nid::SECP112R1;
pub const SECP_MIDDLE: Nid = Nid::SECP160R1;
pub const SECP_STRONG: Nid = Nid::SECP521R1;
pub const BRAINPOOL_WEAK: Nid = Nid::BRAINPOOL_P256R1;
pub const BRAINPOOL_MIDDLE: Nid = Nid::BRAINPOOL_P384R1;
pub const BRAINPOOL_STRONG: Nid = Nid::BRAINPOOL_P512R1;

36
src/macros.rs Normale Datei
Datei anzeigen

@ -0,0 +1,36 @@
use macro_rules_attribute::derive_alias;
#[allow(unused_macros)]
macro_rules! match_error {
($result:expr =>Err($errorname:ident)-> $errormessage:literal $(, $returntype:expr)?) => {
match $result {
Ok(ok) => ok,
Err($errorname) => {
::log::error!($errormessage);
return $($returntype)*;
},
}
};
}
#[allow(unused_macros)]
macro_rules! attr_function {
(
$visibility:vis $attr:ident $($items:ident).* => $type:ty
)=>{
$visibility fn $attr(&self) -> $type {
self$(.$items).*.$attr.clone()
}
}
}
derive_alias! {
#[derive(DefDer!)] = #[derive(Debug, Clone)];
#[derive(Hashable!)] = #[derive(Eq, Hash)];
}
#[allow(unused_imports)]
pub(crate) use {
attr_function,
match_error,
};

291
src/main.rs Normale Datei
Datei anzeigen

@ -0,0 +1,291 @@
//! Acme client that supports multiple CAs and configs for sites that can be seperate from the mainconfig
#![allow(dead_code)]
#![allow(clippy::clone_on_copy)]
#![allow(clippy::identity_op)]
pub(crate) mod config;
pub(crate) mod consts;
pub(crate) mod macros;
pub(crate) mod process;
pub(crate) mod structs;
pub(crate) mod utils;
use crate::{
config::SiteConfig,
consts::*,
macros::match_error,
structs::{
Arguments,
ProcessorArgs,
},
utils::prefix_emails,
};
use acme2_eab::{
Account,
AccountBuilder,
Directory,
DirectoryBuilder,
};
use async_scoped::TokioScope;
use clap::Parser;
use config::CA;
use env_logger::init as log_init;
use libsystemd::daemon;
use log::*;
use openssl::{
self,
pkey::{
PKey,
Private,
},
};
use process::process_site;
use reqwest::{
Client,
tls::Version,
};
use std::{
collections::{
HashMap,
HashSet,
},
path::{
Path,
PathBuf,
},
str::FromStr,
sync::Arc,
time::Duration,
};
use tokio::{
fs::{
create_dir_all,
read_dir,
},
io::{
AsyncReadExt,
AsyncWriteExt,
},
sync::Mutex,
};
use tokio_stream::{
StreamExt,
wrappers::ReadDirStream,
};
use zbus_systemd::systemd1;
type SafeSet<T> = Mutex<HashSet<T>>;
fn default_client() -> reqwest::Client {
reqwest::Client::builder().min_tls_version(Version::TLS_1_2).https_only(true).pool_max_idle_per_host(POOL_SIZE).build().unwrap()
}
async fn load_privkey(path: PathBuf) -> Result<PKey<Private>, ()> {
let mut file = match_error!(FILE_MODE.open(path).await=>Err(error)-> "Failed to open Private Key: {error}", Err(()));
let mut data = String::new();
if let Err(error) = file.read_to_string(&mut data).await {
error!("Failed to read data for the key: {error}");
return Err(());
}
match PKey::private_key_from_pem(data.as_bytes()) {
Ok(key) => Ok(key),
Err(error) => {
error!("Failed to parse pem data: {error}");
Err(())
},
}
}
async fn process_accounts(
name: &String,
ca: &CA,
directories: &mut HashMap<String, Arc<Directory>>,
accounts: &mut HashMap<String, Arc<Account>>,
client: &Client,
accountpath: PathBuf,
) {
let directory = match directories.get(&ca.directory) {
Some(directory) => directory.to_owned(),
None => {
match DirectoryBuilder::new(ca.directory.clone()).http_client(client.clone()).build().await {
Ok(dir) => {
directories.insert(ca.directory.clone(), Arc::clone(&dir));
dir
},
Err(error) => {
error!("Failed to initialize directory for ca {name}: {error}");
return;
},
}
},
};
let mut ac = AccountBuilder::new(Arc::clone(&directory));
match ca.email_addresses.clone() {
Some(addr) => {
ac.contact(prefix_emails(addr));
},
None => {
ac.contact(Vec::new());
debug!("No Email address given")
},
}
let accountkey = accountpath.join("file.pem").with_file_name(name.clone());
let mut accountkeyfile = None;
if accountkey.exists() {
if let Ok(key) = load_privkey(accountkey).await {
ac.private_key(key);
}
} else {
info!("Registering for the CA {}", name.clone());
accountkeyfile = match FILE_MODE_WRITE.open(accountkey).await {
Ok(file) => Some(file),
Err(error) => {
error!("Failed to open the file for the accountkey: {error}");
return;
},
}
}
if let Some(meta) = &directory.meta {
// Collecting the errors about the metadata before annoying the admin about errors at different stages
let mut errors = false;
if let Some(tos) = &meta.terms_of_service {
if !ca.tos_accepted {
error!("Terms of Services were not agreed into: {tos}");
errors = true;
} else {
ac.terms_of_service_agreed(true);
}
}
if meta.external_account_required.unwrap_or(false) {
if let Some(eab) = &ca.eab {
match eab.key() {
Ok(private) => {
trace!("EAB Key info: Type={:?} Bits={}, Security-Bits={}", private.id(), private.bits(), private.security_bits());
ac.external_account_binding(eab.token.clone(), private);
},
Err(error) => {
error!("{error}");
errors = true;
},
}
} else {
error!("eab_token and/or eab_key are unset, but the CA requires those.");
errors = true;
}
} else if ca.eab.is_some() {
warn!("The CA doesn't need EAB Tokens but they were configured")
}
if errors {
return;
}
}
let account = match ac.build().await {
Ok(account) => {
accounts.insert(name.clone(), Arc::clone(&account));
account
},
Err(error) => {
error!("Failed to get/create account: {error}");
return;
},
};
if let Some(mut keyfile) = accountkeyfile {
let keydata = match_error!(account.private_key().private_key_to_pem_pkcs8()=>Err(error)-> "Failed to convert the private key to an pem: {error}");
if let Err(error) = keyfile.write(keydata.as_slice()).await {
error!("Failed to write the accountkey: {error}");
}
}
}
async fn racme(flags: Arguments) {
let client = default_client();
let systemd_access = daemon::booted();
let mainconfig =
config::read_config::<config::GeneralConfig>(match_error!(FILE_MODE.open(flags.config).await=>Err(error)-> "error reading the config: {error}")).await;
trace!("Parsed Config: {mainconfig:?}");
let files = ReadDirStream::new(match_error!(read_dir(mainconfig.sites_path.clone()).await=>Err(error)-> "could not read files from sites dir: {error}"));
let mut siteconfigs = Vec::new();
for file in files.filter(Result::is_ok).map(|file| file.unwrap().path()).collect::<Vec<PathBuf>>().await {
let mut site = config::read_config::<SiteConfig>(FILE_MODE.open(file.clone()).await.unwrap()).await;
site.name = file.file_stem().unwrap().to_str().unwrap().to_string();
siteconfigs.push(site);
}
let used = siteconfigs.iter().map(|s| s.ca.clone()).collect::<HashSet<_>>();
debug!("Used CAs: {used:?}");
let mut directories = HashMap::<String, Arc<Directory>>::new();
let mut accounts = HashMap::new();
let accountpath = Path::new(&mainconfig.accounts_path).to_path_buf();
if let Err(error) = create_dir_all(accountpath.clone()).await {
error!("Failed to create the directory for the accounts: {error}");
return;
}
for (name, ca) in mainconfig.ca.iter().filter(|(name, _)| used.contains(name.to_owned())) {
process_accounts(name, ca, &mut directories, &mut accounts, &client, accountpath.clone()).await;
}
let restart_services = Mutex::new(HashSet::<String>::new());
let reload_services = Mutex::new(HashSet::<String>::new());
let certs = Path::new(&mainconfig.certificates_path).to_path_buf();
if !certs.exists() {
if let Err(error) = create_dir_all(certs.clone()).await {
error!("Failed to create directory for all the certificates: {error}");
return;
}
}
let challengepath = mainconfig.http_challenge_path.and_then(|path| PathBuf::from_str(path.as_str()).ok());
let dnsserver = None;
unsafe {
TokioScope::scope_and_collect(|scope| {
for site in siteconfigs {
if let Some(account) = accounts.get(&site.ca) {
scope.spawn(process_site(ProcessorArgs::new(
site,
Arc::clone(account),
&reload_services,
&restart_services,
certs.clone(),
challengepath.clone(),
dnsserver.clone(),
)));
} else {
error!("Could not process site {} because of previous errors", site.name)
}
}
})
}
.await;
if systemd_access {
let conn = match_error!(zbus_systemd::zbus::Connection::system().await=>Err(error)-> "Failed to connect with the systemd manager: {error}");
let systemd_manager = systemd1::ManagerProxy::new(&conn).await.unwrap();
let restart_services = restart_services.into_inner();
for service in reload_services.into_inner().difference(&restart_services.clone()) {
match systemd_manager.reload_unit(service.to_owned(), "replace".to_string()).await {
Ok(_) => info!("Reloaded {service}"),
Err(error) => error!("Failed to reload service {service}: {error}"),
};
}
for service in restart_services.iter() {
match systemd_manager.restart_unit(service.to_owned(), "replace".to_string()).await {
Ok(_) => info!("Restarted {service}"),
Err(error) => error!("Failed to restart service {service}: {error}"),
};
}
}
}
fn main() {
log_init();
let runtime = tokio::runtime::Builder::new_current_thread().enable_all().build().unwrap();
runtime.block_on(racme(Arguments::parse()));
runtime.shutdown_timeout(Duration::from_secs(1));
}

251
src/process.rs Normale Datei
Datei anzeigen

@ -0,0 +1,251 @@
use std::{
fs::Permissions,
os::unix::fs::PermissionsExt,
path::PathBuf,
};
use crate::{
config::{
Algorithm,
match_algo,
},
consts::{
ATTEMPTS,
FILE_MODE,
FILE_MODE_WRITE,
WAIT_TIME,
},
load_privkey,
match_error,
structs::{
ProcessorArgs,
San,
},
utils::{
gen_key,
is_matching,
},
};
use acme2_eab::{
Authorization,
ChallengeStatus,
Csr,
Identifier,
OrderBuilder,
OrderStatus,
};
use async_scoped::TokioScope;
use log::*;
use openssl::{
hash::MessageDigest,
stack::Stack,
x509::{
X509,
X509Extension,
X509Req,
X509v3Context,
extension::{
BasicConstraints,
ExtendedKeyUsage,
KeyUsage,
SubjectAlternativeName,
},
},
};
use tokio::{
fs::{
create_dir_all,
remove_file,
},
io::{
AsyncReadExt,
AsyncWriteExt,
},
};
fn gen_stack(args: &ProcessorArgs, context: X509v3Context) -> Stack<X509Extension> {
let mut stack = Stack::new().unwrap();
let _ = stack.push({
let mut subaltname = SubjectAlternativeName::new();
for san in args.san() {
match san {
San::Dns(domain) => subaltname.dns(&domain),
San::Email(email) => subaltname.email(&email),
San::IPAddress(ip) => subaltname.ip(ip.to_canonical().to_string().as_ref()),
};
}
subaltname.build(&context).unwrap()
});
stack.push(BasicConstraints::new().critical().build().unwrap()).unwrap();
stack.push(ExtendedKeyUsage::new().server_auth().critical().build().unwrap()).unwrap();
stack.push(KeyUsage::new().critical().digital_signature().key_agreement().key_encipherment().build().unwrap()).unwrap();
stack
}
pub async fn process_site(args: ProcessorArgs<'_>) {
let mut cert_renew = false;
info!("Processing Site {}", args.name());
let directory = args.certificate_dir().join(args.name().clone());
if !directory.exists() {
if let Err(error) = create_dir_all(directory.clone()).await {
error!("Failed to create directory for site {}: {}", args.name(), error);
return;
};
cert_renew = true;
}
let private_key_file = directory.join("privkey.pem");
let mut private_key;
let mut write_pkey = false;
if !private_key_file.exists() {
cert_renew = true;
write_pkey = true;
private_key = match_error!(gen_key(args.algorithm(), args.strength())=>Err(error)-> "Aborting processing the site due to problem with the certificate generation: {error}");
} else if let Ok(key) = load_privkey(private_key_file.clone()).await {
private_key = key;
if !match_algo(&private_key, args.algorithm(), args.strength()) {
info!("Algorithm for the private key has changed, updating the key");
cert_renew = true;
write_pkey = true;
private_key = match_error!(gen_key(args.algorithm(), args.strength())=>Err(error)-> "Aborting processing the site due to problem with the certificate generation: {error}");
}
} else {
error!("Failed to parse the private key. Renewing the private key.");
write_pkey = true;
cert_renew = true;
private_key = match_error!(gen_key(args.algorithm(), args.strength())=>Err(error)-> "Aborting processing the site due to problem with the certificate generation: {error}");
}
if write_pkey {
let pkey = private_key.private_key_to_pem_pkcs8().unwrap();
let mut file = match_error!(FILE_MODE_WRITE.open(private_key_file.clone()).await=>Err(error)-> "Failed to write new private key: {error}");
match_error!(file.write_all(&pkey).await=>Err(error)->"Failed to write new private key: {error}");
}
let pubkey_filename = directory.join("pubkey.pem");
if pubkey_filename.exists() {
let mut file = match_error!(FILE_MODE.open(pubkey_filename.clone()).await=>Err(error)-> "Failed to open publickey. Aborting processing: {error}");
let mut data = String::new();
if let Err(error) = file.read_to_string(&mut data).await {
cert_renew = true;
error!("Failed to read public key: {error}")
} else {
let pubkey = match X509::from_pem(data.as_bytes()) {
Ok(key) => key,
Err(_) => todo!(),
};
if !is_matching(pubkey, args.refresh_time(), args.san()) {
info!("Subject Alternative Names differ from Certifcate");
cert_renew = true;
};
}
} else {
cert_renew = true;
}
if !cert_renew {
info!("Site {} doesn't need an update for the certificate.", args.name());
return;
}
info!("Renewing Certificate for site {}", args.name());
let mut builder = OrderBuilder::new(args.account());
builder.set_identifiers(args.san().iter().map(|s| s.to_owned().into()).collect::<Vec<Identifier>>());
let mut order = match_error!(builder.build().await=>Err(error)-> "Failed order the certificate: {error}");
let authorizations = match_error!(order.authorizations().await=>Err(error)-> "Failed to get the authorizations: {error}");
let (_, result) = tokio::join! {
unsafe {
TokioScope::scope_and_collect(|scope|{
for auth in authorizations {
scope.spawn(process_auth(auth, args.challenge_dir(), args.dnsserver()));
}
})
},
order.wait_ready(WAIT_TIME, ATTEMPTS),
};
order = match_error!(result=>Err(error)-> "Failed to process order: {error}");
if order.status == OrderStatus::Invalid {
error!("Failed the Order, check the logs for more information");
return;
}
let mut csr = X509Req::builder().unwrap();
if let Err(error) = csr.set_pubkey(&private_key) {
error!("failed to add the public key: {error}");
return;
}
let _ = csr.add_extensions(&gen_stack(&args, csr.x509v3_context(None)));
if let Err(error) = csr.sign(
&private_key,
match args.algorithm() {
Algorithm::Rsa => MessageDigest::sha3_512(),
_ => MessageDigest::null(),
},
) {
error!("Failed to sign Request: {error}");
return;
}
order = match_error!(order.finalize(Csr::Custom(csr.build())).await=>Err(error)-> "Failed to finalize the order: {error}");
order = match_error!(order.wait_done(WAIT_TIME, ATTEMPTS).await=>Err(error)-> "Failed to finalize the order: {error}");
if order.status != OrderStatus::Valid {
error!("Failed to complete the order: check the logs for more information");
return;
}
let certs = order.certificate().await.unwrap().unwrap();
debug!("Received {} certificates.", certs.len());
let mut pubkey_file = match_error!(FILE_MODE_WRITE.open(pubkey_filename).await=>Err(error)-> "Failed to open the file for the publickey: {error}");
match_error!(pubkey_file.write_all(&certs[0].to_pem().unwrap()).await=>Err(error)-> "Failed to write the publickey: {error}");
let mut fullchain = match_error!(FILE_MODE_WRITE.open(directory.join("fullchain.pem")).await=>Err(error)-> "failed to open the fullchain.pem: {error}");
for cert in certs.clone() {
let _ = fullchain.write_all(&cert.to_pem().unwrap()).await;
}
let mut bundle = match_error!(FILE_MODE_WRITE.open(directory.join("bundle.pem")).await=>Err(error)-> "failed to open the bundle.pem: {error}");
let _ = bundle.write_all(&private_key.private_key_to_pem_pkcs8().unwrap()).await;
let _ = bundle.write_all(&certs[0].to_pem().unwrap()).await;
info!("Processing of {} successful", args.name());
let mut services = args.reload_list().await;
for service in &args.reload_services() {
services.insert(service.to_owned());
}
let mut services = args.restart_list().await;
for service in &args.restart_services() {
services.insert(service.to_owned());
}
}
pub async fn process_auth(auth: Authorization, challenge_dir: Option<PathBuf>, dnsserver: Option<()>) {
if let Some(_dnschallenge) = auth.get_challenge("dns-01") {
if let Some(_dnsserver) = dnsserver {
} else {
debug!("DNS-01 is disabled")
}
}
if !auth.wildcard.unwrap_or(false) {
if let Some(mut challenge) = auth.get_challenge("http-01") {
trace!("CA has an http-challenge");
if let Some(directory) = challenge_dir {
match_error!(create_dir_all(directory.clone()).await=>Err(error)-> "Failed to ensure the directory exists: {error}");
let filename = directory.join(challenge.token.clone().unwrap());
let mut challengefile =
match_error!(FILE_MODE_WRITE.open(filename.clone()).await=>Err(error)-> "Failed to open the file for the http-challenge: {error}");
match_error!(challengefile.set_permissions(Permissions::from_mode(0o644)).await=>Err(error)-> "Failed to give the file the nessesary permissions: {error}");
match_error!(
challengefile.write_all(challenge.key_authorization().unwrap().unwrap().as_bytes()).await=>Err(error)->
"Failed to write the challenge to the file: {error}"
);
drop(challengefile);
challenge = match_error!(challenge.validate().await=>Err(error)-> "failed to inform the server about the deployed challenge: {error}");
challenge = match_error!(challenge.wait_done(WAIT_TIME, ATTEMPTS).await=>Err(error)-> "Server took too long for the validation: {error}");
if challenge.status == ChallengeStatus::Invalid {
error!("Failed the challenge: {}", challenge.error.unwrap())
} else {
info!("Challenge for {} successful", auth.identifier.value)
}
if let Err(error) = remove_file(filename).await {
error!("Failed to remove the challenge after it has been deployed: {error}");
}
return;
} else {
debug!("No Challengedir given")
}
}
}
error!("Cannot prove the challenges: {}", auth.challenges.iter().map(|c| c.r#type.clone()).collect::<Vec<_>>().join(", "))
}

199
src/structs.rs Normale Datei
Datei anzeigen

@ -0,0 +1,199 @@
use std::{
collections::HashSet,
fmt::Display,
net::IpAddr,
path::PathBuf,
sync::Arc,
};
use acme2_eab::{
Account,
Identifier,
};
use clap::Parser;
use macro_rules_attribute::macro_rules_derive;
use openssl::x509::GeneralName;
use tokio::sync::MutexGuard;
use crate::{
SafeSet,
config::{
Algorithm,
SiteConfig,
Strength,
},
macros::{
DefDer,
Hashable,
attr_function,
},
};
#[macro_rules_derive(DefDer)]
#[derive(Parser)]
pub struct Arguments {
pub config: String,
}
#[macro_rules_derive(DefDer)]
pub struct ProcessorArgs<'a> {
site: SiteConfig,
account: Arc<Account>,
reload_services: &'a SafeSet<String>,
restart_services: &'a SafeSet<String>,
certificate_dir: PathBuf,
refresh_time: u32,
challenge_dir: Option<PathBuf>,
dnsserver: Option<()>,
}
impl<'a: 'b, 'b> ProcessorArgs<'a> {
attr_function!(pub name site => String);
attr_function!(pub algorithm site => Algorithm);
attr_function!(pub strength site => Strength);
attr_function!(pub domains site => Vec<String>);
attr_function!(pub addresses site => Vec<IpAddr>);
attr_function!(pub emails site => Vec<String>);
attr_function!(pub restart_services site => Vec<String>);
attr_function!(pub reload_services site => Vec<String>);
attr_function!(pub certificate_dir => PathBuf);
attr_function!(pub refresh_time => u32);
attr_function!(pub challenge_dir => Option<PathBuf>);
attr_function!(pub dnsserver => Option<()>);
pub fn new(
site: SiteConfig,
account: Arc<Account>,
reload_services: &'a SafeSet<String>,
restart_services: &'a SafeSet<String>,
certificate_dir: PathBuf,
http_challenge_dir: Option<PathBuf>,
dnsserver: Option<()>,
) -> Self {
ProcessorArgs {
site,
account,
reload_services,
restart_services,
certificate_dir,
refresh_time: 7,
challenge_dir: http_challenge_dir,
dnsserver,
}
}
pub fn account(&self) -> Arc<Account> {
Arc::clone(&self.account)
}
pub async fn restart_list(&self) -> MutexGuard<'b, HashSet<String>> {
self.restart_services.lock().await
}
pub async fn reload_list(&self) -> MutexGuard<'b, HashSet<String>> {
self.reload_services.lock().await
}
pub fn san(&self) -> Vec<San> {
let mut sans = Vec::with_capacity(self.site.domains.len() + self.site.emails.len() + self.site.addresses.len());
for domain in self.domains() {
sans.push(San::Dns(domain));
}
for address in self.addresses() {
sans.push(San::IPAddress(address));
}
for email in self.emails() {
sans.push(San::Email(email));
}
sans
}
}
#[macro_rules_derive(DefDer, Hashable)]
#[derive(PartialEq)]
pub enum San {
Dns(String),
Email(String),
IPAddress(IpAddr),
}
impl From<GeneralName> for San {
fn from(value: GeneralName) -> Self {
if let Some(dns) = value.dnsname() {
return Self::Dns(dns.to_owned());
}
if let Some(ipaddr) = value.ipaddress() {
if ipaddr.len() == 4 {
let mut addr = [0u8; 4];
addr.copy_from_slice(ipaddr);
return Self::IPAddress(IpAddr::from(addr));
} else {
let mut addr = [0u8; 16];
addr.copy_from_slice(ipaddr);
return Self::IPAddress(IpAddr::from(addr));
}
}
if let Some(email) = value.email() {
return Self::Email(email.to_owned());
}
unreachable!();
}
}
impl From<San> for Identifier {
fn from(value: San) -> Self {
match value {
San::Dns(domain) => {
Identifier {
r#type: "dns".into(),
value: domain,
}
},
San::Email(email) => {
Identifier {
r#type: "email".into(),
value: email,
}
},
San::IPAddress(ip) => {
Identifier {
r#type: "ip".into(),
value: ip.to_string(),
}
},
}
}
}
#[macro_rules_derive(DefDer)]
pub struct Error(String);
impl Error {
#[inline]
pub fn err<T>(message: String) -> Result<T, Self> {
Err(Self::new(message))
}
#[inline]
pub fn new(message: String) -> Self {
Self(message)
}
}
impl Display for Error {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_str(&self.0)
}
}

122
src/utils.rs Normale Datei
Datei anzeigen

@ -0,0 +1,122 @@
use std::collections::HashSet;
use crate::{
config::{
Algorithm,
Strength,
},
consts::{
BRAINPOOL_MIDDLE,
BRAINPOOL_STRONG,
BRAINPOOL_WEAK,
SECP_MIDDLE,
SECP_STRONG,
SECP_WEAK,
},
structs::San,
};
use log::*;
use openssl::{
asn1::Asn1Time,
ec::EcKey,
error::ErrorStack,
nid::Nid,
pkey::{
PKey,
Private,
},
rsa::Rsa,
x509::{
X509,
X509Name,
X509NameBuilder,
},
};
pub fn prefix_emails(input: Vec<String>) -> Vec<String> {
let mut output = Vec::with_capacity(input.len());
for mut addr in input {
if addr.starts_with("mailto:") {
output.push(addr);
} else {
addr.insert_str(0, "mailto:");
output.push(addr);
}
}
output
}
fn gen_ec_key(algorithm: Algorithm, strength: Strength) -> Result<PKey<Private>, ErrorStack> {
let (weak, middle, strong) = match algorithm {
Algorithm::Rsa | Algorithm::ED25519 => unreachable!(),
Algorithm::Secp => (SECP_WEAK, SECP_MIDDLE, SECP_STRONG),
Algorithm::Brainpool => (BRAINPOOL_WEAK, BRAINPOOL_MIDDLE, BRAINPOOL_STRONG),
};
let algo = EcKey::from_curve_name(match strength {
Strength::Weak => weak,
Strength::Middle => middle,
Strength::Strong => strong,
});
let key = match algo {
Err(error) => {
error!("Failed to generate key due of an problem with the algorithms: {error}");
return Err(error);
},
Ok(algo) => EcKey::generate(algo.group()),
};
match key {
Ok(private) => PKey::from_ec_key(private),
Err(error) => {
error!("Failed to generate Private key from EcKey: {error}");
Err(error)
},
}
}
pub fn gen_key(algorithm: Algorithm, strength: Strength) -> Result<PKey<Private>, String> {
let key = match algorithm {
Algorithm::Rsa => {
let key = Rsa::generate(strength.rsabits());
match key {
Ok(key) => PKey::from_rsa(key),
Err(error) => Err(error),
}
},
Algorithm::Secp => gen_ec_key(Algorithm::Secp, strength),
Algorithm::ED25519 => PKey::generate_ed25519(),
Algorithm::Brainpool => gen_ec_key(Algorithm::Brainpool, strength),
};
match key {
Ok(key) => Ok(key),
Err(error) => Err(format!("Failed to generate an key to the parameters: {error}")),
}
}
pub fn is_matching(cert: X509, daydiff: u32, sans: Vec<San>) -> bool {
let now = Asn1Time::days_from_now(daydiff).unwrap();
if cert.not_after().compare(&now).is_ok_and(|order| order.is_le()) {
return false;
}
let alt_names = match cert.subject_alt_names() {
None => return false,
Some(x) => x,
};
let mut cert_san = HashSet::<San>::new();
for san in alt_names {
cert_san.insert(san.into());
}
let mut config_san = HashSet::with_capacity(sans.len());
for san in sans {
config_san.insert(san);
}
config_san.difference(&cert_san).count() == 0
}
pub fn string_to_cn(name: String) -> X509Name {
let mut builder = X509NameBuilder::new().unwrap();
builder.append_entry_by_nid(Nid::COMMONNAME, &name).unwrap();
builder.build()
}