feat: build a generic database upgrade mechanism (#299)

* feat: build a generic database upgrade mechanism

* fix: PR review + add migration doc

* fix: resorb clippy lints about cast (loss or truncation) and  missing #Errors in docs
This commit is contained in:
Manuthor 2024-09-09 11:12:44 +02:00 committed by GitHub
parent 405f6ef9a2
commit a08eb207f3
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
117 changed files with 2641 additions and 1491 deletions

View file

@ -108,9 +108,7 @@ jobs:
MYSQL_ROOT_PASSWORD: kms
KMS_MYSQL_URL: mysql://root:kms@mariadb/kms
KMS_ENCLAVE_DIR_PATH: data/public
KMS_SQLITE_PATH: data/shared
KMS_CERTBOT_SSL_PATH: data/private
REDIS_HOST: redis

1
.gitignore vendored
View file

@ -1,5 +1,6 @@
target/
.cargo_check/
.history/
*nix
*.swp
TODO.md

View file

@ -2,6 +2,40 @@
All notable changes to this project will be documented in this file.
## [4.18.0] - 2024-09-XX
### 🚀 Features
- Add ReKey KMIP operation ([#294](https://github.com/Cosmian/kms/pull/294))
- Add API token authentication between server and clients ([#290](https://github.com/Cosmian/kms/pull/290))
- Build a generic database upgrade mechanism ([#299](https://github.com/Cosmian/kms/pull/299))
### 🐛 Bug Fixes
- KMIP Attributes:
* In get_attributes, use attributes from ObjectWithMetadata instead of Object.Attributes ([#278](https://github.com/Cosmian/kms/pull/278))
* When inserting in db, force Object::Attributes to be synced with Attributes ([#279](https://github.com/Cosmian/kms/pull/279))
- Certificates handling/tasks:
* **Validate** KMIP operation:
- Simplify getting CRLs and get returned errors ([#268](https://github.com/Cosmian/kms/pull/268))
- Validate certificate generation ([#283](https://github.com/Cosmian/kms/pull/283))
- Use certificate file path in ckms arguments ([#292](https://github.com/Cosmian/kms/pull/292))
* **Certify** KMIP operation: Server must sign x509 after adding X509 extensions ([#282](https://github.com/Cosmian/kms/pull/282))
- Merge decrypt match in same function ([#295](https://github.com/Cosmian/kms/pull/295))
- Fix Public RSA Key size in get attributes ([#275](https://github.com/Cosmian/kms/pull/275))
- RUSTSEC:
* **RUSTSEC-2024-0357**: MemBio::get_buf has undefined behavior with empty buffers: upgrade crate `openssl` from 1.0.64 to 1.0.66 ([#280](https://github.com/Cosmian/kms/pull/280))
* **RUSTSEC-2024-0363**: Binary Protocol Misinterpretation caused by Truncating or Overflowing Casts: bump sqlx to 0.8.1 ([#291](https://github.com/Cosmian/kms/pull/291) and [#297](https://github.com/Cosmian/kms/pull/297))
### ⚙️ Miscellaneous Tasks
- **clippy** tasks:
* Only expose pub functions that need to be public ([#277](https://github.com/Cosmian/kms/pull/277))
* Hardcode clippy lints ([#293](https://github.com/Cosmian/kms/pull/293))
- Rename MacOS artifacts giving CPU architecture
- Configure `ckms` to build reqwest with minimal idle connections reuse ([#272](https://github.com/Cosmian/kms/pull/272))
- Do not delete tags if none are provided ([#276](https://github.com/Cosmian/kms/pull/276))
## [4.17.0] - 2024-07-05
### 🚀 Features

8
Cargo.lock generated
View file

@ -1404,6 +1404,7 @@ dependencies = [
"serde",
"serde_json",
"sqlx",
"tempfile",
"thiserror",
"time",
"tokio",
@ -1413,6 +1414,7 @@ dependencies = [
"tracing-subscriber",
"url",
"uuid",
"version-compare",
"x509-parser",
"zeroize",
]
@ -4992,6 +4994,12 @@ version = "0.2.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426"
[[package]]
name = "version-compare"
version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "852e951cb7832cb45cb1169900d19760cfa39b82bc0ea9c0e5a14ae88411c98b"
[[package]]
name = "version_check"
version = "0.9.5"

View file

@ -21,6 +21,15 @@ pub enum AccessAction {
}
impl AccessAction {
/// Processes the access action.
///
/// # Arguments
///
/// * `kms_rest_client` - The KMS client used for the action.
///
/// # Errors
///
/// Returns an error if there was a problem running the action.
pub async fn process(&self, kms_rest_client: &KmsClient) -> CliResult<()> {
match self {
Self::Grant(action) => action.run(kms_rest_client).await?,
@ -58,6 +67,16 @@ pub struct GrantAccess {
}
impl GrantAccess {
/// Runs the `GrantAccess` action.
///
/// # Arguments
///
/// * `kms_rest_client` - A reference to the KMS client used to communicate with the KMS server.
///
/// # Errors
///
/// Returns an error if the query execution on the KMS server fails.
///
pub async fn run(&self, kms_rest_client: &KmsClient) -> CliResult<()> {
let access = Access {
unique_identifier: Some(UniqueIdentifier::TextString(self.object_uid.clone())),
@ -104,6 +123,16 @@ pub struct RevokeAccess {
}
impl RevokeAccess {
/// Runs the `RevokeAccess` action.
///
/// # Arguments
///
/// * `kms_rest_client` - A reference to the KMS client used to communicate with the KMS server.
///
/// # Errors
///
/// Returns an error if the query execution on the KMS server fails.
///
pub async fn run(&self, kms_rest_client: &KmsClient) -> CliResult<()> {
let access = Access {
unique_identifier: Some(UniqueIdentifier::TextString(self.object_uid.clone())),
@ -138,6 +167,16 @@ pub struct ListAccessesGranted {
}
impl ListAccessesGranted {
/// Runs the `ListAccessesGranted` action.
///
/// # Arguments
///
/// * `kms_rest_client` - A reference to the KMS client used to communicate with the KMS server.
///
/// # Errors
///
/// Returns an error if the query execution on the KMS server fails.
///
pub async fn run(&self, kms_rest_client: &KmsClient) -> CliResult<()> {
let accesses = kms_rest_client
.list_access(&self.object_uid)
@ -164,6 +203,16 @@ impl ListAccessesGranted {
pub struct ListOwnedObjects;
impl ListOwnedObjects {
/// Runs the `ListOwnedObjects` action.
///
/// # Arguments
///
/// * `kms_rest_client` - A reference to the KMS client used to communicate with the KMS server.
///
/// # Errors
///
/// Returns an error if the query execution on the KMS server fails.
///
pub async fn run(&self, kms_rest_client: &KmsClient) -> CliResult<()> {
let objects = kms_rest_client
.list_owned_objects()
@ -190,6 +239,16 @@ impl ListOwnedObjects {
pub struct ListAccessRightsObtained;
impl ListAccessRightsObtained {
/// Runs the `ListAccessRightsObtained` action.
///
/// # Arguments
///
/// * `kms_rest_client` - A reference to the KMS client used to communicate with the KMS server.
///
/// # Errors
///
/// Returns an error if the query execution on the KMS server fails.
///
pub async fn run(&self, kms_rest_client: &KmsClient) -> CliResult<()> {
let objects = kms_rest_client
.list_access_rights_obtained()

View file

@ -36,6 +36,16 @@ pub enum CertificatesCommands {
}
impl CertificatesCommands {
/// Process the `Certificates` main commands.
///
/// # Arguments
///
/// * `kms_rest_client` - A reference to the KMS client used to communicate with the KMS server.
///
/// # Errors
///
/// Returns an error if the query execution on the KMS server fails.
///
pub async fn process(&self, client_connector: &KmsClient) -> CliResult<()> {
match self {
Self::Certify(action) => action.run(client_connector).await,

View file

@ -90,53 +90,78 @@ impl Stdout {
self.object_owned = Some(object_owned);
}
/// Writes the output to the console.
///
/// # Errors
///
/// Returns an error if there is an issue with writing to the console.
pub fn write(&self) -> CliResult<()> {
// Check if the output format should be JSON
let json_format_from_env = std::env::var(KMS_CLI_FORMAT)
.unwrap_or_else(|_| CLI_DEFAULT_FORMAT.to_string())
.to_lowercase()
== CLI_JSON_FORMAT;
if json_format_from_env {
// Serialize the output as JSON and print it
let console_stdout = serde_json::to_string_pretty(&self)?;
println!("{console_stdout}");
} else {
// Print the output in text format
if !self.stdout.is_empty() {
println!("{}", self.stdout);
}
// Print the unique identifier if present
if let Some(id) = &self.unique_identifier {
println!("\t Unique identifier: {id}");
}
// Print the list of unique identifiers if present
if let Some(ids) = &self.unique_identifiers {
for id in ids {
println!("{id}");
}
}
// Print the public key unique identifier if present
if let Some(id) = &self.public_key_unique_identifier {
println!("\t Public key unique identifier: {id}");
}
// Print the private key unique identifier if present
if let Some(id) = &self.private_key_unique_identifier {
println!("\t Private key unique identifier: {id}");
}
// Print the attributes if present
if let Some(attributes) = &self.attributes {
let json = serde_json::to_string_pretty(attributes)?;
println!("{json}");
}
// Print the list of accesses if present
if let Some(accesses) = &self.accesses {
for access in accesses {
println!(" - {}: {:?}", access.user_id, access.operations);
}
}
// Print the list of access rights obtained if present
if let Some(access_rights_obtained) = &self.access_rights_obtained {
for access in access_rights_obtained {
println!("{access}");
}
}
// Print the list of objects owned if present
if let Some(object_owned) = &self.object_owned {
for obj in object_owned {
println!("{obj}");
}
}
// Print the list of tags if present
if let Some(t) = &self.tags {
if !t.is_empty() {
println!("\n Tags:");

View file

@ -107,13 +107,13 @@ impl DecryptAction {
&metadata_and_cleartext.plaintext,
&self.input_files,
self.output_file.as_ref(),
)?
)?;
} else {
write_single_decrypted_data(
&metadata_and_cleartext.plaintext,
&self.input_files[0],
self.output_file.as_ref(),
)?
)?;
}
Ok(())
}

View file

@ -26,7 +26,7 @@ pub struct EncryptAction {
input_files: Vec<PathBuf>,
/// The encryption policy to encrypt the file with
/// Example: "department::marketing && level::confidential"`
/// Example: "`department::marketing` && `level::confidential`"
#[clap(required = true)]
encryption_policy: String,
@ -105,9 +105,9 @@ impl EncryptAction {
// Write the encrypted data
if cryptographic_algorithm == CryptographicAlgorithm::CoverCryptBulk {
write_bulk_encrypted_data(&data, &self.input_files, self.output_file.as_ref())?
write_bulk_encrypted_data(&data, &self.input_files, self.output_file.as_ref())?;
} else {
write_single_encrypted_data(&data, &self.input_files[0], self.output_file.as_ref())?
write_single_encrypted_data(&data, &self.input_files[0], self.output_file.as_ref())?;
}
Ok(())
}

View file

@ -54,7 +54,7 @@ pub struct CreateUserKeyAction {
/// The access policy as a boolean expression combining policy attributes.
///
/// Example: "(Department::HR || Department::MKG) && Security Level::Confidential"
/// Example: "(`Department::HR` || `Department::MKG`) && Security `Level::Confidential`"
#[clap(required = true)]
access_policy: String,

View file

@ -25,6 +25,16 @@ pub enum CovercryptCommands {
}
impl CovercryptCommands {
/// Process the Covercrypt command and execute the corresponding action.
///
/// # Arguments
///
/// * `kms_rest_client` - The KMS client used for communication with the KMS service.
///
/// # Errors
///
/// This function can return an error if any of the underlying actions encounter an error.
///
pub async fn process(&self, kms_rest_client: &KmsClient) -> CliResult<()> {
match self {
Self::Policy(command) => command.process(kms_rest_client).await?,

View file

@ -64,7 +64,7 @@ impl PolicyCommands {
Self::View(action) => action.run(kms_rest_client).await?,
Self::Specs(action) => action.run(kms_rest_client).await?,
Self::Binary(action) => action.run(kms_rest_client).await?,
Self::Create(action) => action.run().await?,
Self::Create(action) => action.run()?,
Self::AddAttribute(action) => action.run(kms_rest_client).await?,
Self::RemoveAttribute(action) => action.run(kms_rest_client).await?,
Self::DisableAttribute(action) => action.run(kms_rest_client).await?,
@ -125,7 +125,7 @@ pub struct CreateAction {
}
impl CreateAction {
pub async fn run(&self) -> CliResult<()> {
pub fn run(&self) -> CliResult<()> {
// Parse the json policy file
let policy = policy_from_json_file(&self.policy_specifications_file)?;
@ -524,6 +524,7 @@ impl RemoveAttributeAction {
}
#[cfg(test)]
#[allow(clippy::items_after_statements)]
mod tests {
use std::path::PathBuf;

View file

@ -24,6 +24,16 @@ pub enum EllipticCurveCommands {
}
impl EllipticCurveCommands {
/// Runs the `EllipticCurveCommands` main commands.
///
/// # Arguments
///
/// * `kms_rest_client` - A reference to the KMS client used to communicate with the KMS server.
///
/// # Errors
///
/// Returns an error if the query execution on the KMS server fails.
///
pub async fn process(&self, kms_rest_client: &KmsClient) -> CliResult<()> {
match self {
Self::Keys(command) => command.process(kms_rest_client).await?,

View file

@ -20,6 +20,16 @@ pub enum GoogleCommands {
}
impl GoogleCommands {
/// Process the Google command by delegating the execution to the appropriate subcommand.
///
/// # Arguments
///
/// * `conf_path` - The path to the configuration file.
///
/// # Errors
///
/// Returns a `CliResult` indicating the success or failure of the command.
///
pub async fn process(&self, conf_path: &PathBuf) -> CliResult<()> {
match self {
Self::Keypairs(command) => command.process(conf_path).await?,

View file

@ -48,6 +48,29 @@ use crate::{
pub struct LoginAction;
impl LoginAction {
/// This function processes the login action.
/// It loads the client configuration from the specified path, retrieves the `OAuth2` configuration,
/// initializes the login state, prompts the user to browse to the authorization URL,
/// finalizes the login process by receiving the authorization code and exchanging it for an access token,
/// updates the configuration with the access token, and saves the configuration to the specified path.
///
/// # Arguments
///
/// * `conf_path` - The path to the client configuration file.
///
/// # Errors
///
/// This function can return a `CliError` in the following cases:
///
/// * The `login` command requires an Identity Provider (`IdP`) that must be configured in the `oauth2_conf` object in the client configuration file.
/// * The client configuration file cannot be loaded.
/// * The `OAuth2` configuration is missing or invalid in the client configuration file.
/// * The authorization URL cannot be parsed.
/// * The authorization code is not received or does not match the CSRF token.
/// * The access token cannot be requested from the Identity Provider.
/// * The token exchange request fails.
/// * The token exchange response cannot be parsed.
/// * The client configuration cannot be updated or saved.
pub async fn process(&self, conf_path: &PathBuf) -> CliResult<()> {
let mut conf = ClientConf::load(conf_path)?;
let oauth2_conf = conf.oauth2_conf.as_ref().ok_or_else(|| {
@ -189,6 +212,18 @@ impl LoginState {
/// This function should be called immediately after the user has been instructed to browse to the authorization URL.
/// It starts a server on localhost:17899 and waits for the authorization code to be received
/// from the browser window. Once the code is received, the server is closed and the code is returned.
///
/// # Errors
///
/// This function can return a `CliError` in the following cases:
///
/// * The authorization code, state, or other parameters are not received from the redirect URL.
/// * The received state does not match the CSRF token.
/// * The authorization code is not received on authentication.
/// * The code received on authentication does not match the CSRF token.
/// * The access token cannot be requested from the Identity Provider.
/// * The token exchange request fails.
/// * The token exchange response cannot be parsed.
pub async fn finalize(&self) -> CliResult<String> {
// recover the authorization code, state and other parameters from the redirect URL
let auth_parameters = Self::receive_authorization_parameters()?;
@ -294,6 +329,20 @@ pub struct OAuthResponse {
/// not in the `access_token` field.
///
/// For Google see: <https://developers.google.com/identity/openid-connect/openid-connect#obtainuserinfo>
///
/// # Arguments
///
/// * `login_config` - The `Oauth2LoginConfig` containing the client configuration.
/// * `redirect_url` - The redirect URL used in the `OAuth2` flow.
/// * `pkce_verifier` - The PKCE code verifier used in the `OAuth2` flow.
/// * `authorization_code` - The authorization code received from the Identity Provider.
///
/// # Errors
///
/// This function can return a `CliError` in the following cases:
///
/// * The token exchange request fails.
/// * The token exchange response cannot be parsed.
pub async fn request_token(
login_config: &Oauth2LoginConfig,
redirect_url: &Url,

View file

@ -13,6 +13,16 @@ use crate::error::result::CliResult;
pub struct LogoutAction;
impl LogoutAction {
/// Process the logout action.
///
/// # Arguments
///
/// * `conf_path` - The path to the ckms configuration file.
///
/// # Errors
///
/// Returns an error if there is an issue loading or saving the configuration file.
///
pub fn process(&self, conf_path: &PathBuf) -> CliResult<()> {
let mut conf = ClientConf::load(conf_path)?;
conf.kms_access_token = None;

View file

@ -13,6 +13,11 @@ pub struct MarkdownAction {
}
impl MarkdownAction {
/// Process the given command and generate the markdown documentation.
///
/// # Errors
///
/// Returns an error if there is an issue creating or writing to the markdown file.
pub fn process(&self, cmd: &Command) -> CliResult<()> {
let mut output = String::new();
writeln!(

View file

@ -19,6 +19,16 @@ use crate::error::result::{CliResult, CliResultHelper};
pub struct NewDatabaseAction;
impl NewDatabaseAction {
/// Process the `NewDatabaseAction` by querying the KMS to get a new database.
///
/// # Arguments
///
/// * `kms_rest_client` - The KMS client used to communicate with the KMS server.
///
/// # Errors
///
/// Returns an error if the query execution on the KMS server fails.
///
pub async fn process(&self, kms_rest_client: &KmsClient) -> CliResult<()> {
// Query the KMS to get a new database
let token = kms_rest_client

View file

@ -24,6 +24,15 @@ pub enum RsaCommands {
}
impl RsaCommands {
/// Process the RSA command by executing the corresponding action.
///
/// # Arguments
///
/// * `kms_rest_client` - A reference to the KMS client used for communication with the KMS service.
///
/// # Errors
///
/// Returns an error if there is an issue executing the command.
pub async fn process(&self, kms_rest_client: &KmsClient) -> CliResult<()> {
match self {
Self::Keys(command) => command.process(kms_rest_client).await?,

View file

@ -108,6 +108,14 @@ pub struct ExportKeyAction {
impl ExportKeyAction {
/// Export a key from the KMS
///
/// # Errors
///
/// This function can return an error if:
///
/// - Either `--key-id` or one or more `--tag` is not specified.
/// - There is a server error while exporting the object.
///
pub async fn run(&self, kms_rest_client: &KmsClient) -> CliResult<()> {
let id = if let Some(key_id) = &self.key_id {
key_id.clone()

View file

@ -78,6 +78,21 @@ pub struct GetAttributesAction {
}
impl GetAttributesAction {
/// Get the KMIP object attributes and tags.
///
/// When using tags to retrieve the object, rather than the object id,
/// an error is returned if multiple objects matching the tags are found.
///
/// # Errors
///
/// This function can return an error if:
///
/// - The `--id` or one or more `--tag` options is not specified.
/// - There is an error serializing the tags to a string.
/// - There is an error performing the Get Attributes request.
/// - There is an error serializing the attributes to JSON.
/// - There is an error writing the attributes to the output file.
/// - There is an error writing to the console.
pub async fn process(&self, kms_rest_client: &KmsClient) -> CliResult<()> {
trace!("GetAttributesAction: {:?}", self);
let id = if let Some(key_id) = &self.id {

View file

@ -111,6 +111,23 @@ pub struct ImportKeyAction {
}
impl ImportKeyAction {
/// Run the import key action.
///
/// # Errors
///
/// This function can return a [`CliError`] if an error occurs during the import process.
///
/// Possible error cases include:
///
/// - Failed to read the key file.
/// - Failed to parse the key file in the specified format.
/// - Invalid key format specified.
/// - Failed to assign cryptographic usage mask.
/// - Failed to generate import attributes.
/// - Failed to import the key.
/// - Failed to write the response to stdout.
///
/// [`CliError`]: ../error/result/enum.CliError.html
pub async fn run(&self, kms_rest_client: &KmsClient) -> CliResult<()> {
let cryptographic_usage_mask = self
.key_usage

View file

@ -94,6 +94,10 @@ pub struct LocateObjectsAction {
impl LocateObjectsAction {
/// Export a key from the KMS
///
/// # Errors
///
/// Returns an error if there is a problem communicating with the KMS or if the requested key cannot be located.
pub async fn process(&self, kms_rest_client: &KmsClient) -> CliResult<()> {
let mut attributes = Attributes::default();

View file

@ -68,6 +68,18 @@ pub struct UnwrapKeyAction {
impl UnwrapKeyAction {
/// Export a key from the KMS
///
/// # Errors
///
/// This function can return an error if:
///
/// - The key file cannot be read.
/// - The unwrap key fails to decode from base64.
/// - The unwrapping key fails to be created.
/// - The unwrapping key fails to unwrap the key.
/// - The output file fails to be written.
/// - The console output fails to be written.
///
pub async fn run(&self, kms_rest_client: &KmsClient) -> CliResult<()> {
// read the key file
let mut object = read_object_from_json_ttlv_file(&self.key_file_in)?;

View file

@ -62,6 +62,21 @@ pub struct WrapKeyAction {
}
impl WrapKeyAction {
/// Run the wrap key action.
///
/// # Errors
///
/// This function can return an error if:
///
/// - The key file cannot be read.
/// - The key is already wrapped and cannot be wrapped again.
/// - The wrap key cannot be decoded from base64.
/// - The wrap password cannot be derived into a symmetric key.
/// - The wrap key cannot be exported from the KMS.
/// - The wrap key file cannot be read.
/// - The key block cannot be wrapped with the wrapping key.
/// - The wrapped key object cannot be written to the output file.
/// - The console output cannot be written.
pub async fn run(&self, kms_rest_client: &KmsClient) -> CliResult<()> {
// read the key file
let mut object = read_object_from_json_ttlv_file(&self.key_file_in)?;

View file

@ -18,6 +18,16 @@ pub enum SymmetricCommands {
}
impl SymmetricCommands {
/// Process the symmetric command and execute the corresponding action.
///
/// # Errors
///
/// This function can return an error if any of the underlying actions encounter an error.
///
/// # Arguments
///
/// * `kms_rest_client` - The KMS client used for communication with the KMS service.
///
pub async fn process(&self, kms_rest_client: &KmsClient) -> CliResult<()> {
match self {
Self::Keys(command) => command.process(kms_rest_client).await?,

View file

@ -10,6 +10,15 @@ use crate::error::result::{CliResult, CliResultHelper};
pub struct ServerVersionAction;
impl ServerVersionAction {
/// Process the server version action.
///
/// # Arguments
///
/// * `kms_rest_client` - The KMS client instance used to communicate with the KMS server.
///
/// # Errors
///
/// Returns an error if the version query fails or if there is an issue writing to the console.
pub async fn process(&self, kms_rest_client: &KmsClient) -> CliResult<()> {
let version = kms_rest_client
.version()

View file

@ -6,9 +6,27 @@ use super::CliError;
pub type CliResult<R> = Result<R, CliError>;
/// Trait for providing helper methods for `CliResult`.
pub trait CliResultHelper<T> {
/// Sets the reason for the error.
///
/// # Errors
///
/// Returns a `CliResult` with the specified `ErrorReason`.
fn reason(self, reason: ErrorReason) -> CliResult<T>;
/// Sets the context for the error.
///
/// # Errors
///
/// Returns a `CliResult` with the specified context.
fn context(self, context: &str) -> CliResult<T>;
/// Sets the context for the error using a closure.
///
/// # Errors
///
/// Returns a `CliResult` with the context returned by the closure.
fn with_context<D, O>(self, op: O) -> CliResult<T>
where
D: Display + Send + Sync + 'static,

View file

@ -6,6 +6,7 @@
let_underscore,
rust_2024_compatibility,
unreachable_pub,
unused,
clippy::all,
clippy::suspicious,
clippy::complexity,
@ -16,7 +17,6 @@
)]
#![allow(
clippy::module_name_repetitions,
clippy::missing_errors_doc,
clippy::too_many_lines,
clippy::cargo_common_metadata,
clippy::multiple_crate_versions

View file

@ -198,7 +198,7 @@ async fn test_certificate_import_encrypt(
true,
)?;
let _subca_certificate_id = import_certificate(
let subca_certificate_id = import_certificate(
&ctx.owner_client_conf_path,
"certificates",
&format!("test_data/certificates/{subca_path}"),
@ -221,7 +221,7 @@ async fn test_certificate_import_encrypt(
None,
None,
Some(private_key_id.clone()),
Some(_subca_certificate_id),
Some(subca_certificate_id),
Some(tags),
None,
false,

View file

@ -178,46 +178,6 @@ pub(crate) async fn test_export_sym_allow_revoked() -> CliResult<()> {
#[cfg(not(feature = "fips"))]
#[tokio::test]
pub(crate) async fn test_export_covercrypt() -> CliResult<()> {
// create a temp dir
let tmp_dir = TempDir::new()?;
let tmp_path = tmp_dir.path();
// init the test server
let ctx = start_default_test_kms_server().await;
// generate a new master key pair
let (master_private_key_id, _master_public_key_id) = create_cc_master_key_pair(
&ctx.owner_client_conf_path,
"--policy-specifications",
"test_data/policy_specifications.json",
&[],
)?;
_export_cc_test(
KeyFormatType::CoverCryptSecretKey,
&master_private_key_id,
tmp_path,
ctx,
)?;
_export_cc_test(
KeyFormatType::CoverCryptPublicKey,
&_master_public_key_id,
tmp_path,
ctx,
)?;
let user_key_id = create_user_decryption_key(
&ctx.owner_client_conf_path,
&master_private_key_id,
"(Department::MKG || Department::FIN) && Security Level::Top Secret",
&[],
)?;
_export_cc_test(
KeyFormatType::CoverCryptSecretKey,
&user_key_id,
tmp_path,
ctx,
)?;
fn _export_cc_test(
key_format_type: KeyFormatType,
key_id: &str,
@ -258,6 +218,46 @@ pub(crate) async fn test_export_covercrypt() -> CliResult<()> {
Ok(())
}
// create a temp dir
let tmp_dir = TempDir::new()?;
let tmp_path = tmp_dir.path();
// init the test server
let ctx = start_default_test_kms_server().await;
// generate a new master key pair
let (master_private_key_id, master_public_key_id) = create_cc_master_key_pair(
&ctx.owner_client_conf_path,
"--policy-specifications",
"test_data/policy_specifications.json",
&[],
)?;
_export_cc_test(
KeyFormatType::CoverCryptSecretKey,
&master_private_key_id,
tmp_path,
ctx,
)?;
_export_cc_test(
KeyFormatType::CoverCryptPublicKey,
&master_public_key_id,
tmp_path,
ctx,
)?;
let user_key_id = create_user_decryption_key(
&ctx.owner_client_conf_path,
&master_private_key_id,
"(Department::MKG || Department::FIN) && Security Level::Top Secret",
&[],
)?;
_export_cc_test(
KeyFormatType::CoverCryptSecretKey,
&user_key_id,
tmp_path,
ctx,
)?;
Ok(())
}
@ -348,12 +348,12 @@ pub(crate) async fn test_export_x25519() -> CliResult<()> {
Some(CryptographicAlgorithm::ECDH)
);
let kv = &key_block.key_value;
let (d, recommended_curve) = match &kv.key_material {
KeyMaterial::TransparentECPrivateKey {
d,
recommended_curve,
} => (d, recommended_curve),
_ => panic!("Invalid key value type"),
let KeyMaterial::TransparentECPrivateKey {
d,
recommended_curve,
} = &kv.key_material
else {
panic!("Invalid key value type");
};
assert_eq!(recommended_curve, &RecommendedCurve::CURVE25519);
let mut d_vec = d.to_bytes_be();
@ -407,12 +407,12 @@ pub(crate) async fn test_export_x25519() -> CliResult<()> {
Some(CryptographicAlgorithm::ECDH)
);
let kv = &key_block.key_value;
let (q_string, recommended_curve) = match &kv.key_material {
KeyMaterial::TransparentECPublicKey {
q_string,
recommended_curve,
} => (q_string, recommended_curve),
_ => panic!("Invalid key value type"),
let KeyMaterial::TransparentECPublicKey {
q_string,
recommended_curve,
} = &kv.key_material
else {
panic!("Invalid key value type")
};
assert_eq!(recommended_curve, &RecommendedCurve::CURVE25519);
let pkey_1 = PKey::public_key_from_raw_bytes(q_string, Id::X25519).unwrap();

View file

@ -15,6 +15,7 @@ use cosmian_kms_client::{
},
},
},
kmip::extra::tagging::EMPTY_TAGS,
read_object_from_json_ttlv_file, write_kmip_object_to_file,
};
use kms_test_server::start_default_test_kms_server;
@ -96,7 +97,7 @@ pub(crate) async fn test_import_export_wrap_rfc_5649() -> CliResult<()> {
None,
None,
None,
&[] as &[&str],
&EMPTY_TAGS,
)?;
test_import_export_wrap_private_key(
&ctx.owner_client_conf_path,
@ -192,7 +193,7 @@ pub(crate) async fn test_import_export_wrap_ecies() -> CliResult<()> {
None,
None,
None,
&[] as &[&str],
&EMPTY_TAGS,
)?;
test_import_export_wrap_private_key(
&ctx.owner_client_conf_path,

View file

@ -6,7 +6,7 @@ use cloudproof::reexport::crypto_core::{
reexport::rand_core::{RngCore, SeedableRng},
CsRng,
};
use cosmian_kms_client::KMS_CLI_CONF_ENV;
use cosmian_kms_client::{kmip::extra::tagging::EMPTY_TAGS, KMS_CLI_CONF_ENV};
use kms_test_server::start_default_test_kms_server;
use super::SUB_COMMAND;
@ -78,7 +78,7 @@ pub(crate) async fn test_create_symmetric_key() -> CliResult<()> {
Some(128),
None,
None,
&[] as &[&str],
&EMPTY_TAGS,
)?;
// AES 256 bit key from a base64 encoded key
rng.fill_bytes(&mut key);
@ -88,7 +88,7 @@ pub(crate) async fn test_create_symmetric_key() -> CliResult<()> {
None,
Some(&key_b64),
None,
&[] as &[&str],
&EMPTY_TAGS,
)?;
}
@ -100,7 +100,7 @@ pub(crate) async fn test_create_symmetric_key() -> CliResult<()> {
None,
None,
Some("chacha20"),
&[] as &[&str],
&EMPTY_TAGS,
)?;
// ChaCha20 128 bit key
create_symmetric_key(
@ -108,7 +108,7 @@ pub(crate) async fn test_create_symmetric_key() -> CliResult<()> {
Some(128),
None,
Some("chacha20"),
&[] as &[&str],
&EMPTY_TAGS,
)?;
// ChaCha20 256 bit key from a base64 encoded key
let mut rng = CsRng::from_entropy();
@ -120,7 +120,7 @@ pub(crate) async fn test_create_symmetric_key() -> CliResult<()> {
None,
Some(&key_b64),
Some("chacha20"),
&[] as &[&str],
&EMPTY_TAGS,
)?;
}
@ -132,7 +132,7 @@ pub(crate) async fn test_create_symmetric_key() -> CliResult<()> {
None,
None,
Some("sha3"),
&[] as &[&str],
&EMPTY_TAGS,
)?;
// ChaCha20 salts
create_symmetric_key(
@ -140,28 +140,28 @@ pub(crate) async fn test_create_symmetric_key() -> CliResult<()> {
Some(224),
None,
Some("sha3"),
&[] as &[&str],
&EMPTY_TAGS,
)?;
create_symmetric_key(
&ctx.owner_client_conf_path,
Some(256),
None,
Some("sha3"),
&[] as &[&str],
&EMPTY_TAGS,
)?;
create_symmetric_key(
&ctx.owner_client_conf_path,
Some(384),
None,
Some("sha3"),
&[] as &[&str],
&EMPTY_TAGS,
)?;
create_symmetric_key(
&ctx.owner_client_conf_path,
Some(512),
None,
Some("sha3"),
&[] as &[&str],
&EMPTY_TAGS,
)?;
// ChaCha20 256 bit salt from a base64 encoded salt
let mut rng = CsRng::from_entropy();
@ -173,7 +173,7 @@ pub(crate) async fn test_create_symmetric_key() -> CliResult<()> {
None,
Some(&key_b64),
Some("sha3"),
&[] as &[&str],
&EMPTY_TAGS,
)?;
}
Ok(())

View file

@ -104,7 +104,7 @@ impl AeadCipher {
CryptographicAlgorithm::ChaCha20 => {
if block_cipher_mode.is_some() {
kmip_bail!(KmipError::NotSupported(
"ChaCha20 is only supported with Pooly1305. Do not specify the Block \
"ChaCha20 is only supported with Poly1305. Do not specify the Block \
Cipher Mode"
.to_owned()
));

View file

@ -157,7 +157,7 @@ fn unwrap_with_private_key(
match private_key.id() {
Id::RSA => unwrap_with_rsa(private_key, key_wrapping_data, ciphertext),
#[cfg(not(feature = "fips"))]
Id::EC | Id::X25519 | Id::ED25519 => ecies_decrypt(&private_key, ciphertext),
Id::EC | Id::X25519 | Id::ED25519 => ecies_decrypt(private_key, ciphertext),
other => {
kmip_bail!(
"Unable to wrap key: wrapping public key type not supported: {:?}",

View file

@ -197,7 +197,7 @@ fn wrap_with_public_key(
match public_key.id() {
Id::RSA => wrap_with_rsa(public_key, key_wrapping_data, key_to_wrap),
#[cfg(not(feature = "fips"))]
Id::EC | Id::X25519 | Id::ED25519 => ecies_encrypt(&public_key, key_to_wrap),
Id::EC | Id::X25519 | Id::ED25519 => ecies_encrypt(public_key, key_to_wrap),
other => Err(kmip_error!(
"Unable to wrap key: wrapping public key type not supported: {other:?}"
)),

View file

@ -482,7 +482,7 @@ mod tests {
fn test_private_key_conversion_pkcs(
private_key: &PKey<Private>,
id: Id,
keysize: u32,
key_size: u32,
kft: KeyFormatType,
) {
#[cfg(feature = "fips")]
@ -511,14 +511,14 @@ mod tests {
if kft == KeyFormatType::PKCS8 {
let private_key_ = PKey::private_key_from_pkcs8(&key_value).unwrap();
assert_eq!(private_key_.id(), id);
assert_eq!(private_key_.bits(), keysize);
assert_eq!(private_key_.bits(), key_size);
assert_eq!(
private_key_.private_key_to_pkcs8().unwrap(),
key_value.to_vec()
);
let private_key_ = kmip_private_key_to_openssl(&object_).unwrap();
assert_eq!(private_key_.id(), id);
assert_eq!(private_key_.bits(), keysize);
assert_eq!(private_key_.bits(), key_size);
assert_eq!(
private_key_.private_key_to_pkcs8().unwrap(),
key_value.to_vec()
@ -526,14 +526,14 @@ mod tests {
} else {
let private_key_ = PKey::private_key_from_der(&key_value).unwrap();
assert_eq!(private_key_.id(), id);
assert_eq!(private_key_.bits(), keysize);
assert_eq!(private_key_.bits(), key_size);
assert_eq!(
private_key_.private_key_to_der().unwrap(),
key_value.to_vec()
);
let private_key_ = kmip_private_key_to_openssl(&object_).unwrap();
assert_eq!(private_key_.id(), id);
assert_eq!(private_key_.bits(), keysize);
assert_eq!(private_key_.bits(), key_size);
assert_eq!(
private_key_.private_key_to_der().unwrap(),
key_value.to_vec()
@ -541,7 +541,7 @@ mod tests {
}
}
fn test_private_key_conversion_sec1(private_key: &PKey<Private>, id: Id, keysize: u32) {
fn test_private_key_conversion_sec1(private_key: &PKey<Private>, id: Id, key_size: u32) {
#[cfg(feature = "fips")]
let mask = Some(FIPS_PRIVATE_ECC_MASK_SIGN_ECDH);
#[cfg(not(feature = "fips"))]
@ -568,14 +568,14 @@ mod tests {
let private_key_ =
PKey::from_ec_key(EcKey::private_key_from_der(&key_value).unwrap()).unwrap();
assert_eq!(private_key_.id(), id);
assert_eq!(private_key_.bits(), keysize);
assert_eq!(private_key_.bits(), key_size);
assert_eq!(
private_key_.private_key_to_der().unwrap(),
key_value.to_vec()
);
let private_key_ = kmip_private_key_to_openssl(&object_).unwrap();
assert_eq!(private_key_.id(), id);
assert_eq!(private_key_.bits(), keysize);
assert_eq!(private_key_.bits(), key_size);
assert_eq!(
private_key_.private_key_to_der().unwrap(),
key_value.to_vec()
@ -585,7 +585,7 @@ mod tests {
fn test_private_key_conversion_transparent_rsa(
private_key: &PKey<Private>,
id: Id,
keysize: u32,
key_size: u32,
) {
#[cfg(feature = "fips")]
let mask = Some(FIPS_PRIVATE_RSA_MASK);
@ -636,7 +636,7 @@ mod tests {
.unwrap();
assert_eq!(private_key_.id(), id);
assert_eq!(private_key_.bits(), keysize);
assert_eq!(private_key_.bits(), key_size);
assert_eq!(
private_key.private_key_to_der().unwrap(),
private_key_.private_key_to_der().unwrap()
@ -644,7 +644,7 @@ mod tests {
let private_key_ = kmip_private_key_to_openssl(&object_).unwrap();
assert_eq!(private_key_.id(), id);
assert_eq!(private_key_.bits(), keysize);
assert_eq!(private_key_.bits(), key_size);
assert_eq!(
private_key.private_key_to_der().unwrap(),
private_key_.private_key_to_der().unwrap()
@ -657,7 +657,7 @@ mod tests {
ec_group: Option<&EcGroup>,
curve: RecommendedCurve,
id: Id,
keysize: u32,
key_size: u32,
) {
#[cfg(feature = "fips")]
let mask = Some(FIPS_PRIVATE_ECC_MASK_SIGN_ECDH);
@ -692,9 +692,9 @@ mod tests {
let mut privkey_vec = d.to_bytes_be();
// privkey size on curve.
let bytes_keysize = 1 + ((keysize as usize - 1) / 8);
let bytes_key_size = 1 + ((key_size as usize - 1) / 8);
pad_be_bytes(&mut privkey_vec, bytes_keysize);
pad_be_bytes(&mut privkey_vec, bytes_key_size);
if id == Id::EC {
let private_key_ = PKey::from_ec_key(
EcKey::from_private_components(
@ -706,14 +706,14 @@ mod tests {
)
.unwrap();
assert_eq!(private_key_.id(), id);
assert_eq!(private_key_.bits(), keysize);
assert_eq!(private_key_.bits(), key_size);
assert_eq!(
private_key.private_key_to_der().unwrap(),
private_key_.private_key_to_der().unwrap()
);
let private_key_ = kmip_private_key_to_openssl(&object_).unwrap();
assert_eq!(private_key_.id(), id);
assert_eq!(private_key_.bits(), keysize);
assert_eq!(private_key_.bits(), key_size);
assert_eq!(
private_key.private_key_to_der().unwrap(),
private_key_.private_key_to_der().unwrap()
@ -721,14 +721,14 @@ mod tests {
} else {
let private_key_ = PKey::private_key_from_raw_bytes(&privkey_vec, id).unwrap();
assert_eq!(private_key_.id(), id);
assert_eq!(private_key_.bits(), keysize);
assert_eq!(private_key_.bits(), key_size);
assert_eq!(
private_key.raw_private_key().unwrap(),
private_key_.raw_private_key().unwrap()
);
let private_key_ = kmip_private_key_to_openssl(&object_).unwrap();
assert_eq!(private_key_.id(), id);
assert_eq!(private_key_.bits(), keysize);
assert_eq!(private_key_.bits(), key_size);
assert_eq!(
private_key.raw_private_key().unwrap(),
private_key_.raw_private_key().unwrap()
@ -742,26 +742,26 @@ mod tests {
// Load FIPS provider module from OpenSSL.
openssl::provider::Provider::load(None, "fips").unwrap();
let keysize = 2048;
let rsa_private_key = Rsa::generate(keysize).unwrap();
let key_size = 2048;
let rsa_private_key = Rsa::generate(key_size).unwrap();
let private_key = PKey::from_rsa(rsa_private_key).unwrap();
test_private_key_conversion_pkcs(&private_key, Id::RSA, keysize, KeyFormatType::PKCS8);
test_private_key_conversion_pkcs(&private_key, Id::RSA, keysize, KeyFormatType::PKCS1);
test_private_key_conversion_transparent_rsa(&private_key, Id::RSA, keysize);
test_private_key_conversion_pkcs(&private_key, Id::RSA, key_size, KeyFormatType::PKCS8);
test_private_key_conversion_pkcs(&private_key, Id::RSA, key_size, KeyFormatType::PKCS1);
test_private_key_conversion_transparent_rsa(&private_key, Id::RSA, key_size);
}
#[test]
#[cfg(not(feature = "fips"))]
fn test_conversion_ec_p_192_private_key() {
let keysize = 192;
let key_size = 192;
let ec_group = EcGroup::from_curve_name(Nid::X9_62_PRIME192V1).unwrap();
let ec_key = EcKey::generate(&ec_group).unwrap();
let ec_public_key = ec_key.public_key().to_owned(&ec_group).unwrap();
let private_key = PKey::from_ec_key(ec_key).unwrap();
test_private_key_conversion_pkcs(&private_key, Id::EC, keysize, KeyFormatType::PKCS8);
test_private_key_conversion_sec1(&private_key, Id::EC, keysize);
test_private_key_conversion_pkcs(&private_key, Id::EC, key_size, KeyFormatType::PKCS8);
test_private_key_conversion_sec1(&private_key, Id::EC, key_size);
test_private_key_conversion_transparent_ec(
&private_key,
@ -769,7 +769,7 @@ mod tests {
Some(&ec_group),
RecommendedCurve::P192,
Id::EC,
keysize,
key_size,
);
}
@ -779,14 +779,14 @@ mod tests {
// Load FIPS provider module from OpenSSL.
openssl::provider::Provider::load(None, "fips").unwrap();
let keysize = 224;
let key_size = 224;
let ec_group = EcGroup::from_curve_name(Nid::SECP224R1).unwrap();
let ec_key = EcKey::generate(&ec_group).unwrap();
let ec_public_key = ec_key.public_key().to_owned(&ec_group).unwrap();
let private_key = PKey::from_ec_key(ec_key).unwrap();
test_private_key_conversion_pkcs(&private_key, Id::EC, keysize, KeyFormatType::PKCS8);
test_private_key_conversion_sec1(&private_key, Id::EC, keysize);
test_private_key_conversion_pkcs(&private_key, Id::EC, key_size, KeyFormatType::PKCS8);
test_private_key_conversion_sec1(&private_key, Id::EC, key_size);
test_private_key_conversion_transparent_ec(
&private_key,
@ -794,7 +794,7 @@ mod tests {
Some(&ec_group),
RecommendedCurve::P224,
Id::EC,
keysize,
key_size,
);
}
@ -804,14 +804,14 @@ mod tests {
// Load FIPS provider module from OpenSSL.
openssl::provider::Provider::load(None, "fips").unwrap();
let keysize = 256;
let key_size = 256;
let ec_group = EcGroup::from_curve_name(openssl::nid::Nid::X9_62_PRIME256V1).unwrap();
let ec_key = EcKey::generate(&ec_group).unwrap();
let ec_public_key = ec_key.public_key().to_owned(&ec_group).unwrap();
let private_key = PKey::from_ec_key(ec_key).unwrap();
test_private_key_conversion_pkcs(&private_key, Id::EC, keysize, KeyFormatType::PKCS8);
test_private_key_conversion_sec1(&private_key, Id::EC, keysize);
test_private_key_conversion_pkcs(&private_key, Id::EC, key_size, KeyFormatType::PKCS8);
test_private_key_conversion_sec1(&private_key, Id::EC, key_size);
test_private_key_conversion_transparent_ec(
&private_key,
@ -819,7 +819,7 @@ mod tests {
Some(&ec_group),
RecommendedCurve::P256,
Id::EC,
keysize,
key_size,
);
}
@ -829,14 +829,14 @@ mod tests {
// Load FIPS provider module from OpenSSL.
openssl::provider::Provider::load(None, "fips").unwrap();
let keysize = 384;
let key_size = 384;
let ec_group = EcGroup::from_curve_name(openssl::nid::Nid::SECP384R1).unwrap();
let ec_key = EcKey::generate(&ec_group).unwrap();
let ec_public_key = ec_key.public_key().to_owned(&ec_group).unwrap();
let private_key = PKey::from_ec_key(ec_key).unwrap();
test_private_key_conversion_pkcs(&private_key, Id::EC, keysize, KeyFormatType::PKCS8);
test_private_key_conversion_sec1(&private_key, Id::EC, keysize);
test_private_key_conversion_pkcs(&private_key, Id::EC, key_size, KeyFormatType::PKCS8);
test_private_key_conversion_sec1(&private_key, Id::EC, key_size);
test_private_key_conversion_transparent_ec(
&private_key,
@ -844,7 +844,7 @@ mod tests {
Some(&ec_group),
RecommendedCurve::P384,
Id::EC,
keysize,
key_size,
);
}
@ -854,14 +854,14 @@ mod tests {
// Load FIPS provider module from OpenSSL.
openssl::provider::Provider::load(None, "fips").unwrap();
let keysize = 521;
let key_size = 521;
let ec_group = EcGroup::from_curve_name(openssl::nid::Nid::SECP521R1).unwrap();
let ec_key = EcKey::generate(&ec_group).unwrap();
let ec_public_key = ec_key.public_key().to_owned(&ec_group).unwrap();
let private_key = PKey::from_ec_key(ec_key).unwrap();
test_private_key_conversion_pkcs(&private_key, Id::EC, keysize, KeyFormatType::PKCS8);
test_private_key_conversion_sec1(&private_key, Id::EC, keysize);
test_private_key_conversion_pkcs(&private_key, Id::EC, key_size, KeyFormatType::PKCS8);
test_private_key_conversion_sec1(&private_key, Id::EC, key_size);
test_private_key_conversion_transparent_ec(
&private_key,
@ -869,24 +869,24 @@ mod tests {
Some(&ec_group),
RecommendedCurve::P521,
Id::EC,
keysize,
key_size,
);
}
#[test]
#[cfg(not(feature = "fips"))]
fn test_conversion_ec_x25519_private_key() {
let keysize = 253;
let key_size = 253;
let private_key = PKey::generate_x25519().unwrap();
test_private_key_conversion_pkcs(&private_key, Id::X25519, keysize, KeyFormatType::PKCS8);
test_private_key_conversion_pkcs(&private_key, Id::X25519, key_size, KeyFormatType::PKCS8);
test_private_key_conversion_transparent_ec(
&private_key,
None,
None,
RecommendedCurve::CURVE25519,
Id::X25519,
keysize,
key_size,
);
}
@ -896,34 +896,34 @@ mod tests {
// Load FIPS provider module from OpenSSL.
openssl::provider::Provider::load(None, "fips").unwrap();
let keysize = 256;
let key_size = 256;
let private_key = PKey::generate_ed25519().unwrap();
test_private_key_conversion_pkcs(&private_key, Id::ED25519, keysize, KeyFormatType::PKCS8);
test_private_key_conversion_pkcs(&private_key, Id::ED25519, key_size, KeyFormatType::PKCS8);
test_private_key_conversion_transparent_ec(
&private_key,
None,
None,
RecommendedCurve::CURVEED25519,
Id::ED25519,
keysize,
key_size,
);
}
#[test]
#[cfg(not(feature = "fips"))]
fn test_conversion_ec_x448_private_key() {
let keysize = 448;
let key_size = 448;
let private_key = PKey::generate_x448().unwrap();
test_private_key_conversion_pkcs(&private_key, Id::X448, keysize, KeyFormatType::PKCS8);
test_private_key_conversion_pkcs(&private_key, Id::X448, key_size, KeyFormatType::PKCS8);
test_private_key_conversion_transparent_ec(
&private_key,
None,
None,
RecommendedCurve::CURVE448,
Id::X448,
keysize,
key_size,
);
}
@ -933,17 +933,17 @@ mod tests {
// Load FIPS provider module from OpenSSL.
openssl::provider::Provider::load(None, "fips").unwrap();
let keysize = 456;
let key_size = 456;
let private_key = PKey::generate_ed448().unwrap();
test_private_key_conversion_pkcs(&private_key, Id::ED448, keysize, KeyFormatType::PKCS8);
test_private_key_conversion_pkcs(&private_key, Id::ED448, key_size, KeyFormatType::PKCS8);
test_private_key_conversion_transparent_ec(
&private_key,
None,
None,
RecommendedCurve::CURVEED448,
Id::ED448,
keysize,
key_size,
);
}
}

View file

@ -107,6 +107,7 @@ tracing-opentelemetry = "0.24.0"
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
url = { workspace = true }
uuid = { workspace = true, features = ["v4"] }
version-compare = "0.2.0"
x509-parser = { workspace = true }
zeroize = { workspace = true }
@ -114,6 +115,7 @@ zeroize = { workspace = true }
actix-http = "3.6"
cosmian_logger = { path = "../logger" }
pem = "3.0.4"
tempfile = "3.11"
[build-dependencies]
actix-http = "3.6"

View file

@ -201,7 +201,7 @@ impl DBConfig {
fn ensure_url(database_url: Option<&str>, alternate_env_variable: &str) -> KResult<Url> {
let url = if let Some(url) = database_url {
Ok(url.to_string())
Ok(url.to_owned())
} else {
std::env::var(alternate_env_variable).map_err(|_e| {
kms_error!(
@ -220,7 +220,7 @@ fn ensure_value(
env_variable_name: &str,
) -> KResult<String> {
if let Some(value) = value {
Ok(value.to_string())
Ok(value.to_owned())
} else {
std::env::var(env_variable_name).map_err(|_e| {
kms_error!(

View file

@ -65,7 +65,7 @@ impl Default for HttpConfig {
fn default() -> Self {
Self {
port: DEFAULT_PORT,
hostname: DEFAULT_HOSTNAME.to_string(),
hostname: DEFAULT_HOSTNAME.to_owned(),
https_p12_file: None,
https_p12_password: None,
authority_cert_file: None,

View file

@ -63,14 +63,16 @@ impl Display for DbParams {
}
/// Redact the username and password from the URL for logging purposes
#[allow(clippy::expect_used)]
fn redact_url(original: &Url) -> Url {
let mut url = original.clone();
if url.username() != "" {
url.set_username("****").unwrap();
url.set_username("****").expect("masking username failed");
}
if url.password().is_some() {
url.set_password(Some("****")).unwrap();
url.set_password(Some("****"))
.expect("masking password failed");
}
url

View file

@ -1,4 +1,4 @@
use std::{fmt, fs::File, io::Read};
use std::fmt;
use openssl::pkcs12::{ParsedPkcs12_2, Pkcs12};
@ -13,16 +13,28 @@ pub enum HttpParams {
Http,
}
/// Represents the HTTP parameters for the server configuration.
impl HttpParams {
/// Tries to create an instance of `HttpParams` from the given `HttpConfig`.
///
/// # Arguments
///
/// * `config` - The `HttpConfig` object containing the configuration parameters.
///
/// # Returns
///
/// Returns a `KResult` containing the created `HttpParams` instance on success.
///
/// # Errors
///
/// This function can return an error if there is an issue reading the PKCS#12 file or parsing it.
pub fn try_from(config: &HttpConfig) -> KResult<Self> {
// start in HTTPS mode if a PKCS#12 file is provided
if let (Some(p12_file), Some(p12_password)) =
(&config.https_p12_file, &config.https_p12_password)
{
// Open and read the file into a byte vector
let mut file = File::open(p12_file)?;
let mut der_bytes = Vec::new();
file.read_to_end(&mut der_bytes)?;
let der_bytes = std::fs::read(p12_file)?;
// Parse the byte vector as a PKCS#12 object
let sealed_p12 = Pkcs12::from_der(der_bytes.as_slice())?;
let p12 = sealed_p12
@ -35,6 +47,11 @@ impl HttpParams {
}
}
/// Checks if the server is running in HTTPS mode.
///
/// # Returns
///
/// Returns `true` if the server is running in HTTPS mode, `false` otherwise.
#[must_use]
pub const fn is_running_https(&self) -> bool {
matches!(self, Self::Https(_))

View file

@ -1,4 +1,4 @@
use std::{fmt, fs::File, io::Read, path::PathBuf};
use std::{fmt, path::PathBuf};
use openssl::x509::X509;
@ -59,7 +59,21 @@ pub struct ServerParams {
pub ms_dke_service_url: Option<String>,
}
/// Represents the server parameters.
impl ServerParams {
/// Tries to create a `ServerParams` instance from the given `ClapConfig`.
///
/// # Arguments
///
/// * `conf` - The `ClapConfig` object containing the configuration parameters.
///
/// # Returns
///
/// Returns a `KResult` containing the `ServerParams` instance if successful, or an error if the conversion fails.
///
/// # Errors
///
/// Returns an error if the conversion from `ClapConfig` to `ServerParams` fails.
pub fn try_from(conf: ClapConfig) -> KResult<Self> {
let http_params = HttpParams::try_from(&conf.http)?;
@ -95,11 +109,22 @@ impl ServerParams {
})
}
/// Loads the certificate from the given file path.
///
/// # Arguments
///
/// * `authority_cert_file` - The path to the authority certificate file.
///
/// # Returns
///
/// Returns a `KResult` containing the loaded `X509` certificate if successful, or an error if the loading fails.
///
/// # Errors
///
/// Returns an error if the certificate file cannot be read or if the parsing of the certificate fails.
fn load_cert(authority_cert_file: &PathBuf) -> KResult<X509> {
// Open and read the file into a byte vector
let mut file = File::open(authority_cert_file)?;
let mut pem_bytes = Vec::new();
file.read_to_end(&mut pem_bytes)?;
let pem_bytes = std::fs::read(authority_cert_file)?;
// Parse the byte vector as a X509 object
let x509 = X509::from_pem(pem_bytes.as_slice())?;

View file

@ -93,7 +93,7 @@ pub(crate) async fn retrieve_issuer_private_key_and_certificate(
kms_bail!(KmsError::InvalidRequest(
"Either an issuer certificate id or an issuer private key id or both must be provided"
.to_string(),
.to_owned(),
))
}
@ -120,7 +120,7 @@ pub(crate) async fn retrieve_certificate_for_private_key(
.attributes
.get_link(LinkType::PublicKeyLink)
.ok_or_else(|| {
KmsError::InvalidRequest("No public key link found for the private key".to_string())
KmsError::InvalidRequest("No public key link found for the private key".to_owned())
})?;
find_link_in_public_key(
LinkType::CertificateLink,
@ -179,7 +179,7 @@ pub(crate) async fn retrieve_private_key_for_certificate(
.get_link(LinkType::PublicKeyLink)
.ok_or_else(|| {
KmsError::InvalidRequest(
"No private or public key link found for the certificate".to_string(),
"No private or public key link found for the certificate".to_owned(),
)
})?;
find_link_in_public_key(

View file

@ -58,7 +58,7 @@ async fn create_user_decryption_key_(
.ok_or_else(|| {
KmsError::InvalidRequest(
"there should be a reference to the master private key in the creation attributes"
.to_string(),
.to_owned(),
)
})?
.to_string();
@ -130,7 +130,7 @@ pub(crate) async fn create_user_decryption_key_pair(
.or(create_key_pair_request.common_attributes.as_ref())
.ok_or_else(|| {
KmsError::InvalidRequest(
"Missing private attributes in CoverCrypt Create Keypair request".to_string(),
"Missing private attributes in CoverCrypt Create Keypair request".to_owned(),
)
})?;
let private_key = create_user_decryption_key_(
@ -149,13 +149,13 @@ pub(crate) async fn create_user_decryption_key_pair(
.or(create_key_pair_request.common_attributes.as_ref())
.ok_or_else(|| {
KmsError::InvalidRequest(
"Missing public attributes in CoverCrypt Create Keypair request".to_string(),
"Missing public attributes in CoverCrypt Create Keypair request".to_owned(),
)
})?;
let master_public_key_uid = public_key_attributes.get_parent_id().ok_or_else(|| {
KmsError::InvalidRequest(
"the master public key id should be available in the public creation attributes"
.to_string(),
.to_owned(),
)
})?;
let gr_public_key = kmip_server

View file

@ -190,7 +190,7 @@ async fn get_master_keys_and_policy(
.ok_or_else(|| {
KmsError::KmipError(
ErrorReason::Invalid_Object_Type,
"Private key MUST contain a public key link".to_string(),
"Private key MUST contain a public key link".to_owned(),
)
})?;
@ -212,7 +212,7 @@ async fn import_rekeyed_master_keys(
) -> KResult<()> {
// re-import master secret key
let import_request = Import {
unique_identifier: UniqueIdentifier::TextString(msk.0.to_string()),
unique_identifier: UniqueIdentifier::TextString(msk.0),
object_type: ObjectType::PrivateKey,
replace_existing: Some(true),
key_wrap_type: None,
@ -223,7 +223,7 @@ async fn import_rekeyed_master_keys(
// re-import master public key
let import_request = Import {
unique_identifier: UniqueIdentifier::TextString(mpk.0.to_string()),
unique_identifier: UniqueIdentifier::TextString(mpk.0),
object_type: ObjectType::PublicKey,
replace_existing: Some(true),
key_wrap_type: None,

View file

@ -26,14 +26,19 @@ impl<'de> Deserialize<'de> for ExtraDatabaseParams {
D: serde::Deserializer<'de>,
{
let bytes = Zeroizing::from(<Vec<u8>>::deserialize(deserializer)?);
let group_id_bytes: [u8; 16] = bytes[0..16]
.try_into()
.map_err(|_| serde::de::Error::custom("Could not deserialize ExtraDatabaseParams"))?;
let group_id_bytes: [u8; 16] = bytes[0..16].try_into().map_err(|e| {
serde::de::Error::custom(format!(
"Could not deserialize ExtraDatabaseParams. Error: {e:?}"
))
})?;
let group_id = u128::from_be_bytes(group_id_bytes);
let mut key_bytes: [u8; AES_256_GCM_KEY_LENGTH] = bytes[16..48]
.try_into()
.map_err(|_| serde::de::Error::custom("Could not deserialize ExtraDatabaseParams"))?;
let mut key_bytes: [u8; AES_256_GCM_KEY_LENGTH] =
bytes[16..48].try_into().map_err(|e| {
serde::de::Error::custom(format!(
"Could not deserialize ExtraDatabaseParams. Error: {e:?}"
))
})?;
let key = Secret::<AES_256_GCM_KEY_LENGTH>::from_unprotected_bytes(&mut key_bytes);
Ok(Self { group_id, key })
}

View file

@ -60,7 +60,7 @@ impl KMS {
DbParams::RedisFindex(url, master_key, label) => {
// There is no reason to keep a copy of the key in the shared config
// So we are going to create a "zeroizable" copy which will be passed to Redis with Findex
// and zerorize the one in the shared config
// and zeroize the one in the shared config
let new_master_key =
Secret::<REDIS_WITH_FINDEX_MASTER_KEY_LENGTH>::from_unprotected_bytes(
&mut master_key.to_bytes(),
@ -94,7 +94,7 @@ impl KMS {
// check that the cryptographic algorithm is specified
let cryptographic_algorithm = &attributes.cryptographic_algorithm.ok_or_else(|| {
KmsError::InvalidRequest(
"the cryptographic algorithm must be specified for secret key creation".to_string(),
"the cryptographic algorithm must be specified for secret key creation".to_owned(),
)
})?;
@ -102,7 +102,7 @@ impl KMS {
let mut tags = attributes.get_tags();
Attributes::check_user_tags(&tags)?;
//update the tags
tags.insert("_kk".to_string());
tags.insert("_kk".to_owned());
match cryptographic_algorithm {
CryptographicAlgorithm::AES
@ -115,14 +115,15 @@ impl KMS {
| CryptographicAlgorithm::SHAKE128
| CryptographicAlgorithm::SHAKE256 => match attributes.key_format_type {
None => Err(KmsError::InvalidRequest(
"Unable to create a symmetric key, the format type is not specified"
.to_string(),
"Unable to create a symmetric key, the format type is not specified".to_owned(),
)),
Some(KeyFormatType::TransparentSymmetricKey) => {
// create the key
let key_len: usize = attributes
let key_len = attributes
.cryptographic_length
.map_or(AES_256_GCM_KEY_LENGTH, |v| v as usize / 8);
.map(|len| usize::try_from(len / 8))
.transpose()?
.map_or(AES_256_GCM_KEY_LENGTH, |v| v);
let mut symmetric_key = Zeroizing::from(vec![0; key_len]);
rand_bytes(&mut symmetric_key)?;
let object =
@ -159,8 +160,7 @@ impl KMS {
// check that the cryptographic algorithm is specified
let cryptographic_algorithm = &attributes.cryptographic_algorithm.ok_or_else(|| {
KmsError::InvalidRequest(
"the cryptographic algorithm must be specified for private key creation"
.to_string(),
"the cryptographic algorithm must be specified for private key creation".to_owned(),
)
})?;
@ -168,7 +168,7 @@ impl KMS {
let mut tags = attributes.get_tags();
Attributes::check_user_tags(&tags)?;
//update the tags
tags.insert("_uk".to_string());
tags.insert("_uk".to_owned());
match &cryptographic_algorithm {
CryptographicAlgorithm::CoverCrypt => {

View file

@ -553,7 +553,7 @@ impl KMS {
if owner == access.user_id {
kms_bail!(KmsError::Unauthorized(
"You can't grant yourself, you have already all rights on your own objects"
.to_string()
.to_owned()
))
}
@ -595,7 +595,7 @@ impl KMS {
if owner == access.user_id {
kms_bail!(KmsError::Unauthorized(
"You can't revoke yourself, you should keep all rights on your own objects"
.to_string()
.to_owned()
))
}

View file

@ -258,7 +258,7 @@ async fn get_subject(
CertificateRequestType::PEM => X509Req::from_pem(pkcs10_bytes),
CertificateRequestType::PKCS10 => X509Req::from_der(pkcs10_bytes),
CertificateRequestType::CRMF => kms_bail!(KmsError::InvalidRequest(
"Certificate Request Type CRMF not supported".to_string()
"Certificate Request Type CRMF not supported".to_owned()
)),
}?;
let certificate_id = request
@ -312,7 +312,7 @@ async fn get_subject(
let attributes = request.attributes.as_ref().ok_or_else(|| {
KmsError::InvalidRequest(
"Certify from Subject: the attributes specifying the the subject name are missing"
.to_string(),
.to_owned(),
)
})?;
let subject_name = attributes
@ -320,7 +320,7 @@ async fn get_subject(
.as_ref()
.ok_or_else(|| {
KmsError::InvalidRequest(
"Certify from Subject: the subject name is not found in the attributes".to_string(),
"Certify from Subject: the subject name is not found in the attributes".to_owned(),
)
})?
.subject_name()?;
@ -342,7 +342,7 @@ async fn get_subject(
let (private_attributes, public_attributes) = {
let cryptographic_algorithm = attributes.cryptographic_algorithm.ok_or_else(|| {
KmsError::InvalidRequest(
"Keypair creation: the cryptographic algorithm is missing".to_string(),
"Keypair creation: the cryptographic algorithm is missing".to_owned(),
)
})?;
let private_attributes = Attributes {
@ -410,7 +410,10 @@ async fn get_issuer<'a>(
}
None => (None, None),
};
trace!(
"Issuer certificate id: {issuer_certificate_id:?}, issuer private key id: \
{issuer_private_key_id:?}"
);
if issuer_certificate_id.is_none() && issuer_private_key_id.is_none() {
// If no issuer is provided, the subject is self-signed
return issuer_for_self_signed_certificate(subject, kms, user, params).await;
@ -479,7 +482,7 @@ async fn issuer_for_self_signed_certificate<'a>(
.ok_or_else(|| {
KmsError::InvalidRequest(
"No private key linked to the certificate found to renew it as self-signed"
.to_string(),
.to_owned(),
)
})?;
Ok(Issuer::PrivateKeyAndCertificate(
@ -503,7 +506,7 @@ async fn issuer_for_self_signed_certificate<'a>(
KmsError::InvalidRequest(
"No private key link found to create a self-signed certificate from a public \
key"
.to_string(),
.to_owned(),
)
})?;
// see if we can find an existing certificate to link to the public key
@ -543,7 +546,7 @@ fn build_and_sign_certificate(
issuer: &Issuer,
subject: &Subject,
request: Certify,
) -> Result<(Object, HashSet<String>, Attributes), KmsError> {
) -> KResult<(Object, HashSet<String>, Attributes)> {
debug!("Building and signing certificate");
// recover the attributes
let mut attributes = request.attributes.unwrap_or_default();
@ -566,10 +569,15 @@ fn build_and_sign_certificate(
// Create a new Asn1Time object for the current time
let now = Asn1Time::days_from_now(0).context("could not get a date in ASN.1")?;
// retrieve the number of days for the validity of the certificate
let mut number_of_days = attributes.extract_requested_validity_days()?.unwrap_or(365) as u32;
let mut number_of_days =
u32::try_from(attributes.extract_requested_validity_days()?.unwrap_or(365))?;
trace!("Number of days: {}", number_of_days);
// the number of days cannot exceed that of the issuer certificate
if let Some(issuer_not_after) = issuer.not_after() {
number_of_days = min(issuer_not_after.diff(&now)?.days as u32, number_of_days);
trace!("Issuer certificate not after: {issuer_not_after}");
let days = u32::try_from(now.diff(issuer_not_after)?.days)?;
number_of_days = min(days, number_of_days);
}
x509_builder.set_not_before(now.as_ref())?;
x509_builder.set_not_after(
@ -611,7 +619,7 @@ fn build_and_sign_certificate(
// add subject tags if any
tags.extend(subject.tags().iter().cloned());
// add the certificate "system" tag
tags.insert("_cert".to_string());
tags.insert("_cert".to_owned());
// link the certificate to the issuer certificate
attributes.add_link(

View file

@ -106,9 +106,9 @@ pub(crate) fn generate_key_pair_and_tags(
Attributes::check_user_tags(&tags)?;
// Update the tags for the private key and the public key.
let mut sk_tags = tags.clone();
sk_tags.insert("_sk".to_string());
sk_tags.insert("_sk".to_owned());
let mut pk_tags = tags;
pk_tags.insert("_pk".to_string());
pk_tags.insert("_pk".to_owned());
// Grab whatever attributes were supplied on the create request.
let any_attributes = Some(&common_attributes)
@ -133,7 +133,7 @@ pub(crate) fn generate_key_pair_and_tags(
// Check that the cryptographic algorithm is specified.
let cryptographic_algorithm = any_attributes.cryptographic_algorithm.ok_or_else(|| {
KmsError::InvalidRequest(
"the cryptographic algorithm must be specified for key pair creation".to_string(),
"the cryptographic algorithm must be specified for key pair creation".to_owned(),
)
})?;
@ -195,7 +195,7 @@ pub(crate) fn generate_key_pair_and_tags(
|| cryptographic_algorithm == CryptographicAlgorithm::EC
{
kms_bail!(KmsError::NotSupported(
"Edwards curve can't be created for EC or ECDSA".to_string()
"Edwards curve can't be created for EC or ECDSA".to_owned()
))
}
warn!(
@ -217,7 +217,7 @@ pub(crate) fn generate_key_pair_and_tags(
kms_bail!(KmsError::NotSupported(
"An Edwards Keypair on curve 25519 should not be requested to perform \
Elliptic Curves operations in FIPS mode"
.to_string()
.to_owned()
))
}
#[cfg(not(feature = "fips"))]
@ -226,7 +226,7 @@ pub(crate) fn generate_key_pair_and_tags(
|| cryptographic_algorithm == CryptographicAlgorithm::EC
{
kms_bail!(KmsError::NotSupported(
"Edwards curve can't be created for EC or ECDSA".to_string()
"Edwards curve can't be created for EC or ECDSA".to_owned()
))
}
warn!(
@ -248,7 +248,7 @@ pub(crate) fn generate_key_pair_and_tags(
kms_bail!(KmsError::NotSupported(
"An Edwards Keypair on curve 448 should not be requested to perform ECDH \
in FIPS mode."
.to_string()
.to_owned()
))
}
other => kms_bail!(KmsError::NotSupported(format!(
@ -260,7 +260,7 @@ pub(crate) fn generate_key_pair_and_tags(
let key_size_in_bits = u32::try_from(
any_attributes
.cryptographic_length
.ok_or_else(|| KmsError::InvalidRequest("RSA key size: error".to_string()))?,
.ok_or_else(|| KmsError::InvalidRequest("RSA key size: error".to_owned()))?,
)?;
trace!("RSA key pair generation: size in bits: {key_size_in_bits}");

View file

@ -203,7 +203,7 @@ fn dispatch_decrypt(request: &Decrypt, owm: &ObjectWithMetadata) -> KResult<Decr
.unwrap_or(EMPTY_SLICE);
let plaintext = aead_decrypt(aead, &key_bytes, nonce, aad, ciphertext, tag)?;
Ok(DecryptResponse {
unique_identifier: UniqueIdentifier::TextString(owm.id.to_string()),
unique_identifier: UniqueIdentifier::TextString(owm.id.clone()),
data: Some(plaintext),
correlation_value: request.correlation_value.clone(),
})
@ -234,7 +234,7 @@ fn decrypt_with_pkey(
}
};
Ok(DecryptResponse {
unique_identifier: UniqueIdentifier::TextString(key_id.to_string()),
unique_identifier: UniqueIdentifier::TextString(key_id.to_owned()),
data: Some(plaintext),
correlation_value: request.correlation_value.clone(),
})

View file

@ -78,7 +78,7 @@ async fn get_key(
.ok_or(KmsError::UnsupportedPlaceholder)?
.as_str()
.context("Encrypt: the unique identifier or tags must be a string")?
.to_string();
.to_owned();
trace!("operations::encrypt: uid_or_tags: {uid_or_tags}");
// retrieve from tags or use passed identifier
@ -178,7 +178,7 @@ fn encrypt_with_aead(request: &Encrypt, owm: &ObjectWithMetadata) -> KResult<Enc
.unwrap_or(EMPTY_SLICE);
let (ciphertext, tag) = aead_encrypt(aead, &key_bytes, &nonce, aad, plaintext)?;
Ok(EncryptResponse {
unique_identifier: UniqueIdentifier::TextString(owm.id.to_string()),
unique_identifier: UniqueIdentifier::TextString(owm.id.clone()),
data: Some(ciphertext),
iv_counter_nonce: Some(nonce),
correlation_value: request.correlation_value.clone(),
@ -254,7 +254,7 @@ fn encrypt_with_pkey(
}
};
Ok(EncryptResponse {
unique_identifier: UniqueIdentifier::TextString(key_id.to_string()),
unique_identifier: UniqueIdentifier::TextString(key_id.to_owned()),
data: Some(ciphertext),
iv_counter_nonce: None,
correlation_value: request.correlation_value.clone(),

View file

@ -98,7 +98,7 @@ pub(crate) async fn process_symmetric_key(
Attributes::check_user_tags(tags)?;
// Insert the tag corresponding to the object type if tags should be
// updated.
tags.insert("_kk".to_string());
tags.insert("_kk".to_owned());
}
// check if the object will be replaced if it already exists
@ -139,7 +139,7 @@ fn process_certificate(request: Import) -> Result<(String, Vec<AtomicOperation>)
Attributes::check_user_tags(tags)?;
// Insert the tag corresponding to the object type if tags should be
// updated.
tags.insert("_cert".to_string());
tags.insert("_cert".to_owned());
}
// check if the object will be replaced if it already exists
@ -213,7 +213,7 @@ async fn process_public_key(
let mut tags = attributes.remove_tags();
if let Some(tags) = tags.as_mut() {
Attributes::check_user_tags(tags)?;
tags.insert("_pk".to_string());
tags.insert("_pk".to_owned());
}
// check if the object will be replaced if it already exists
@ -384,7 +384,7 @@ fn private_key_from_openssl(
let sk_uid = if request_uid.is_empty() {
Uuid::new_v4().to_string()
} else {
request_uid.to_string()
request_uid.to_owned()
};
let sk_key_block = sk.key_block_mut()?;
@ -399,7 +399,7 @@ fn private_key_from_openssl(
);
let sk_tags = user_tags.map(|mut tags| {
tags.insert("_sk".to_string());
tags.insert("_sk".to_owned());
tags
});
Ok((sk_uid, sk, sk_tags))
@ -460,7 +460,7 @@ fn process_pkcs12(
// build the private key
let (private_key_id, mut private_key, private_key_tags) = {
let openssl_sk = pkcs12.pkey.ok_or_else(|| {
KmsError::InvalidRequest("Private key not found in PKCS12".to_string())
KmsError::InvalidRequest("Private key not found in PKCS12".to_owned())
})?;
private_key_from_openssl(
&openssl_sk,
@ -479,12 +479,12 @@ fn process_pkcs12(
) = {
// Recover the PKCS12 X509 certificate
let openssl_cert = pkcs12.cert.ok_or_else(|| {
KmsError::InvalidRequest("X509 certificate not found in PKCS12".to_string())
KmsError::InvalidRequest("X509 certificate not found in PKCS12".to_owned())
})?;
// insert the tag corresponding to the object type if tags should be updated
let mut leaf_certificate_tags = user_tags.clone().unwrap_or_default();
leaf_certificate_tags.insert("_cert".to_string());
leaf_certificate_tags.insert("_cert".to_owned());
// convert to KMIP
let leaf_certificate = openssl_certificate_to_kmip(&openssl_cert)?;
@ -504,7 +504,7 @@ fn process_pkcs12(
for openssl_cert in cas {
// insert the tag corresponding to the object type if tags should be updated
let mut chain_certificate_tags = user_tags.clone().unwrap_or_default();
chain_certificate_tags.insert("_cert".to_string());
chain_certificate_tags.insert("_cert".to_owned());
// convert to KMIP
let chain_certificate = openssl_certificate_to_kmip(&openssl_cert)?;

View file

@ -40,7 +40,7 @@ pub(crate) async fn locate(
trace!("UIDs: {:?}", uids);
let response = LocateResponse {
located_items: Some(uids.len() as i32),
located_items: Some(i32::try_from(uids.len())?),
unique_identifiers: if uids.is_empty() { None } else { Some(uids) },
};

View file

@ -70,11 +70,11 @@ pub(crate) async fn message(
let response_message = MessageResponse {
header: MessageResponseHeader {
protocol_version: request.header.protocol_version,
batch_count: response_items.len() as u32,
batch_count: u32::try_from(response_items.len())?,
client_correlation_value: None,
server_correlation_value: None,
attestation_type: None,
timestamp: chrono::Utc::now().timestamp() as u64,
timestamp: u64::try_from(chrono::Utc::now().timestamp())?,
nonce: None,
server_hashed_password: None,
},

View file

@ -58,7 +58,7 @@ pub(crate) async fn rekey(
// there can only be one private key
let owm = owm_s
.pop()
.ok_or_else(|| KmsError::KmipError(ErrorReason::Item_Not_Found, uid_or_tags.to_string()))?;
.ok_or_else(|| KmsError::KmipError(ErrorReason::Item_Not_Found, uid_or_tags.to_owned()))?;
if !owm_s.is_empty() {
return Err(KmsError::InvalidRequest(format!(

View file

@ -71,7 +71,7 @@ pub(crate) async fn rekey_keypair(
// there can only be one private key
let owm = owm_s
.pop()
.ok_or_else(|| KmsError::KmipError(ErrorReason::Item_Not_Found, uid_or_tags.to_string()))?;
.ok_or_else(|| KmsError::KmipError(ErrorReason::Item_Not_Found, uid_or_tags.to_owned()))?;
if !owm_s.is_empty() {
return Err(KmsError::InvalidRequest(format!(
@ -90,7 +90,7 @@ pub(crate) async fn rekey_keypair(
kms_bail!(KmsError::InvalidRequest(
"The cryptographic algorithm must be specified in the private key attributes for key \
pair creation"
.to_string()
.to_owned()
))
}
}

View file

@ -53,7 +53,7 @@ pub(crate) async fn revoke_operation(
.await?;
Ok(RevokeResponse {
unique_identifier: UniqueIdentifier::TextString(uid_or_tags.to_string()),
unique_identifier: UniqueIdentifier::TextString(uid_or_tags.to_owned()),
})
}

View file

@ -81,7 +81,7 @@ pub(crate) async fn validate_operation(
{
(None, None) => {
return Err(KmsError::Certificate(
"Empty chain cannot be validated".to_string(),
"Empty chain cannot be validated".to_owned(),
));
}
(None, Some(certificates)) => Ok::<_, KmsError>((certificates.clone(), certificates.len())),
@ -112,7 +112,7 @@ pub(crate) async fn validate_operation(
return Err(KmsError::Certificate(
"Number of certificates found in database and number of certificates in request do \
not match"
.to_string(),
.to_owned(),
));
};
@ -249,7 +249,7 @@ fn sort_certificates(certificates: &[X509]) -> KResult<Vec<X509>> {
if sorted_chains.is_empty() {
return Err(KmsError::Certificate(
"No root authority found, cannot proceed full chain validation".to_string(),
"No root authority found, cannot proceed full chain validation".to_owned(),
));
}
@ -310,7 +310,7 @@ fn sort_certificates(certificates: &[X509]) -> KResult<Vec<X509>> {
if sorted_chains.len() != certificates.len() {
return Err(KmsError::Certificate(
"Failed to sort the certificates. Certificate chain incomplete?".to_string(),
"Failed to sort the certificates. Certificate chain incomplete?".to_owned(),
));
}
@ -343,7 +343,7 @@ fn sort_certificates(certificates: &[X509]) -> KResult<Vec<X509>> {
fn verify_chain_signature(certificates: &[X509]) -> KResult<ValidityIndicator> {
if certificates.is_empty() {
return Err(KmsError::Certificate(
"Certificate chain is empty".to_string(),
"Certificate chain is empty".to_owned(),
));
}
@ -352,7 +352,7 @@ fn verify_chain_signature(certificates: &[X509]) -> KResult<ValidityIndicator> {
// Get leaf
let leaf = certificates.last().ok_or_else(|| {
KmsError::Certificate("Failed to get last element of the chain".to_string())
KmsError::Certificate("Failed to get last element of the chain".to_owned())
})?;
// Add authorities to the store
@ -378,7 +378,7 @@ fn verify_chain_signature(certificates: &[X509]) -> KResult<ValidityIndicator> {
if !result {
return Err(KmsError::Certificate(
"Result of the function verify_cert: {result:?}".to_string(),
"Result of the function verify_cert: {result:?}".to_owned(),
));
}
@ -386,7 +386,7 @@ fn verify_chain_signature(certificates: &[X509]) -> KResult<ValidityIndicator> {
let mut issuer_public_key = certificates
.first()
.ok_or_else(|| {
KmsError::Certificate("Failed to get the first element of the chain".to_string())
KmsError::Certificate("Failed to get the first element of the chain".to_owned())
})?
.public_key()?;
for cert in certificates {
@ -441,10 +441,10 @@ async fn get_crl_bytes(uri_list: Vec<String>) -> KResult<HashMap<String, Vec<u8>
} else {
let path_buf = path::Path::new(&uri).canonicalize()?;
match path_buf.to_str() {
Some(s) => Some(UriType::Path(s.to_string())),
Some(s) => Some(UriType::Path(s.to_owned())),
None => {
return Err(KmsError::Certificate(
"The uri provided is invalid".to_string(),
"The uri provided is invalid".to_owned(),
))
}
}
@ -499,7 +499,7 @@ async fn get_crl_bytes(uri_list: Vec<String>) -> KResult<HashMap<String, Vec<u8>
}
_ => {
return Err(KmsError::Certificate(
"Error that should not manifest".to_string(),
"Error that should not manifest".to_owned(),
))
}
};
@ -555,7 +555,7 @@ async fn verify_crls(certificates: Vec<X509>) -> KResult<ValidityIndicator> {
debug!("Parent CRL verification: revocation status: {res:?}");
if res == ValidityIndicator::Invalid {
return Err(KmsError::Certificate(
"Certificate is revoked or removed from CRL".to_string(),
"Certificate is revoked or removed from CRL".to_owned(),
));
}
}
@ -571,8 +571,8 @@ async fn verify_crls(certificates: Vec<X509>) -> KResult<ValidityIndicator> {
.and_then(|x| x.get(0))
.and_then(GeneralNameRef::uri);
if let Some(crl_uri) = crl_uri {
if !uri_list.contains(&crl_uri.to_string()) {
uri_list.push(crl_uri.to_string());
if !uri_list.contains(&crl_uri.to_owned()) {
uri_list.push(crl_uri.to_owned());
trace!("Found CRL URI: {crl_uri}");
}
}
@ -602,7 +602,7 @@ async fn verify_crls(certificates: Vec<X509>) -> KResult<ValidityIndicator> {
debug!("Revocation status: result: {res:?}");
if res == ValidityIndicator::Invalid {
return Err(KmsError::Certificate(
"Certificate is revoked or removed from CRL".to_string(),
"Certificate is revoked or removed from CRL".to_owned(),
));
}
}
@ -624,7 +624,7 @@ async fn certificates_by_uid(
let mut results = Vec::new();
for unique_identifier in unique_identifiers {
let unique_identifier = unique_identifier.as_str().ok_or_else(|| {
KmsError::Certificate("as_str returned None in certificates_by_uid".to_string())
KmsError::Certificate("as_str returned None in certificates_by_uid".to_owned())
})?;
let result = certificate_by_uid(unique_identifier, kms, user, params).await?;
results.push(result);

View file

@ -57,7 +57,7 @@ pub(crate) async fn unwrap_key(
ObjectType::PublicKey | ObjectType::Certificate => {
let attributes = match object_type {
ObjectType::PublicKey | ObjectType::Certificate => unwrapping_key.attributes,
_ => unreachable!("unwrap_key: unsupported object type: {object_type}"),
_ => kms_bail!("unwrap_key: unsupported object type: {object_type}"),
};
let private_key_uid =
attributes

View file

@ -6,6 +6,7 @@ use std::{
};
use async_trait::async_trait;
use clap::crate_version;
use cosmian_kmip::{
crypto::{secret::Secret, symmetric::AES_256_GCM_KEY_LENGTH},
kmip::{
@ -16,9 +17,9 @@ use cosmian_kmip::{
use cosmian_kms_client::access::{IsWrapped, ObjectOperationType};
use sqlx::{
sqlite::{SqliteConnectOptions, SqlitePoolOptions},
ConnectOptions, Pool, Sqlite,
ConnectOptions, Pool, Row, Sqlite,
};
use tracing::trace;
use tracing::{debug, trace};
use super::{
cached_sqlite_struct::KMSSqliteCache,
@ -33,10 +34,11 @@ use crate::{
core::extra_database_params::ExtraDatabaseParams,
database::{
database_trait::AtomicOperation,
sqlite::{atomic_, retrieve_tags_},
Database, SQLITE_QUERIES,
migrate::do_migration,
sqlite::{atomic_, is_migration_in_progress_, migrate_, retrieve_tags_},
Database, KMS_VERSION_BEFORE_MIGRATION_SUPPORT, SQLITE_QUERIES,
},
kms_bail, kms_error,
get_sqlite_query, kms_bail, kms_error,
result::{KResult, KResultHelper},
};
@ -88,29 +90,21 @@ impl CachedSqlCipher {
}
async fn create_tables(pool: &Pool<Sqlite>) -> KResult<()> {
sqlx::query(
SQLITE_QUERIES
.get("create-table-objects")
.ok_or_else(|| kms_error!("SQL query can't be found"))?,
)
.execute(pool)
.await?;
sqlx::query(get_sqlite_query!("create-table-context"))
.execute(pool)
.await?;
sqlx::query(
SQLITE_QUERIES
.get("create-table-read_access")
.ok_or_else(|| kms_error!("SQL query can't be found"))?,
)
.execute(pool)
.await?;
sqlx::query(get_sqlite_query!("create-table-objects"))
.execute(pool)
.await?;
sqlx::query(
SQLITE_QUERIES
.get("create-table-tags")
.ok_or_else(|| kms_error!("SQL query can't be found"))?,
)
.execute(pool)
.await?;
sqlx::query(get_sqlite_query!("create-table-read_access"))
.execute(pool)
.await?;
sqlx::query(get_sqlite_query!("create-table-tags"))
.execute(pool)
.await?;
Ok(())
}
@ -124,11 +118,11 @@ impl CachedSqlCipher {
group_id: u128,
key: &Secret<AES_256_GCM_KEY_LENGTH>,
) -> KResult<Arc<Pool<Sqlite>>> {
if !self.cache.exists(group_id) {
if !self.cache.exists(group_id)? {
let pool = self.instantiate_group_database(group_id, key).await?;
Self::create_tables(&pool).await?;
self.cache.save(group_id, key, pool).await?;
} else if !self.cache.opened(group_id) {
} else if !self.cache.opened(group_id)? {
let pool = self.instantiate_group_database(group_id, key).await?;
self.cache.save(group_id, key, pool).await?;
}
@ -143,6 +137,50 @@ impl Database for CachedSqlCipher {
Some(self.path.join(format!("{group_id}.sqlite")))
}
async fn migrate(&self, params: Option<&ExtraDatabaseParams>) -> KResult<()> {
if let Some(params) = params {
let pool = self.pre_query(params.group_id, &params.key).await?;
trace!("Migrate database");
// Get the context rows
match sqlx::query(get_sqlite_query!("select-context"))
.fetch_optional(&*pool)
.await?
{
None => {
trace!("No context row found, migrating from scratch");
return migrate_(
&pool,
KMS_VERSION_BEFORE_MIGRATION_SUPPORT,
"insert-context",
)
.await;
}
Some(context_row) => {
let last_kms_version_run = context_row.get::<String, _>(0);
let state = context_row.get::<String, _>(1);
trace!(
"Context row found, migrating from version {last_kms_version_run} (state: \
{state})"
);
let current_kms_version = crate_version!();
debug!(
"[state={state}] Last KMS version run: {last_kms_version_run}, Current \
KMS version: {current_kms_version}"
);
if do_migration(&last_kms_version_run, current_kms_version, &state)? {
return migrate_(&pool, current_kms_version, "update-context").await;
}
}
}
return Ok(());
}
kms_bail!("Missing group_id/key for opening SQLCipher")
}
async fn create(
&self,
uid: Option<String>,
@ -154,6 +192,10 @@ impl Database for CachedSqlCipher {
) -> KResult<String> {
if let Some(params) = params {
let pool = self.pre_query(params.group_id, &params.key).await?;
if is_migration_in_progress_(&*pool).await? {
kms_bail!("Migration in progress. Please retry later");
}
let mut tx = pool.begin().await?;
match create_(uid, owner, object, attributes, tags, &mut tx).await {
Ok(uid) => {
@ -174,14 +216,14 @@ impl Database for CachedSqlCipher {
async fn retrieve(
&self,
uid: &str,
uid_or_tags: &str,
user: &str,
operation_type: ObjectOperationType,
query_access_grant: ObjectOperationType,
params: Option<&ExtraDatabaseParams>,
) -> KResult<HashMap<String, ObjectWithMetadata>> {
if let Some(params) = params {
let pool = self.pre_query(params.group_id, &params.key).await?;
let ret = retrieve_(uid, user, operation_type, &*pool).await;
let ret = retrieve_(uid_or_tags, user, query_access_grant, &*pool).await;
self.post_query(params.group_id)?;
return ret
}
@ -214,6 +256,10 @@ impl Database for CachedSqlCipher {
) -> KResult<()> {
if let Some(params) = params {
let pool = self.pre_query(params.group_id, &params.key).await?;
if is_migration_in_progress_(&*pool).await? {
kms_bail!("Migration in progress. Please retry later");
}
let mut tx = pool.begin().await?;
match update_object_(uid, object, attributes, tags, &mut tx).await {
Ok(()) => {
@ -240,6 +286,9 @@ impl Database for CachedSqlCipher {
) -> KResult<()> {
if let Some(params) = params {
let pool = self.pre_query(params.group_id, &params.key).await?;
if is_migration_in_progress_(&*pool).await? {
kms_bail!("Migration in progress. Please retry later");
}
let mut tx = pool.begin().await?;
match update_state_(uid, state, &mut tx).await {
Ok(()) => {
@ -270,6 +319,9 @@ impl Database for CachedSqlCipher {
) -> KResult<()> {
if let Some(params) = params {
let pool = self.pre_query(params.group_id, &params.key).await?;
if is_migration_in_progress_(&*pool).await? {
kms_bail!("Migration in progress. Please retry later");
}
let mut tx = pool.begin().await?;
match upsert_(uid, user, object, attributes, tags, state, &mut tx).await {
Ok(()) => {
@ -291,13 +343,16 @@ impl Database for CachedSqlCipher {
async fn delete(
&self,
uid: &str,
owner: &str,
user: &str,
params: Option<&ExtraDatabaseParams>,
) -> KResult<()> {
if let Some(params) = params {
let pool = self.pre_query(params.group_id, &params.key).await?;
if is_migration_in_progress_(&*pool).await? {
kms_bail!("Migration in progress. Please retry later");
}
let mut tx = pool.begin().await?;
match delete_(uid, owner, &mut tx).await {
match delete_(uid, user, &mut tx).await {
Ok(()) => {
tx.commit().await?;
self.post_query(params.group_id)?;
@ -316,12 +371,12 @@ impl Database for CachedSqlCipher {
async fn list_user_granted_access_rights(
&self,
owner: &str,
user: &str,
params: Option<&ExtraDatabaseParams>,
) -> KResult<HashMap<String, (String, StateEnumeration, HashSet<ObjectOperationType>)>> {
if let Some(params) = params {
let pool = self.pre_query(params.group_id, &params.key).await?;
let ret = list_user_granted_access_rights_(owner, &*pool).await;
let ret = list_user_granted_access_rights_(user, &*pool).await;
self.post_query(params.group_id)?;
return ret
}
@ -347,13 +402,16 @@ impl Database for CachedSqlCipher {
async fn grant_access(
&self,
uid: &str,
userid: &str,
user: &str,
operation_types: HashSet<ObjectOperationType>,
params: Option<&ExtraDatabaseParams>,
) -> KResult<()> {
if let Some(params) = params {
let pool = self.pre_query(params.group_id, &params.key).await?;
let ret = insert_access_(uid, userid, operation_types, &*pool).await;
if is_migration_in_progress_(&*pool).await? {
kms_bail!("Migration in progress. Please retry later");
}
let ret = insert_access_(uid, user, operation_types, &*pool).await;
self.post_query(params.group_id)?;
return ret
}
@ -364,13 +422,16 @@ impl Database for CachedSqlCipher {
async fn remove_access(
&self,
uid: &str,
userid: &str,
user: &str,
operation_types: HashSet<ObjectOperationType>,
params: Option<&ExtraDatabaseParams>,
) -> KResult<()> {
if let Some(params) = params {
let pool = self.pre_query(params.group_id, &params.key).await?;
let ret = remove_access_(uid, userid, operation_types, &*pool).await;
if is_migration_in_progress_(&*pool).await? {
kms_bail!("Migration in progress. Please retry later");
}
let ret = remove_access_(uid, user, operation_types, &*pool).await;
self.post_query(params.group_id)?;
return ret
}
@ -381,12 +442,12 @@ impl Database for CachedSqlCipher {
async fn is_object_owned_by(
&self,
uid: &str,
userid: &str,
owner: &str,
params: Option<&ExtraDatabaseParams>,
) -> KResult<bool> {
if let Some(params) = params {
let pool = self.pre_query(params.group_id, &params.key).await?;
let ret = is_object_owned_by_(uid, userid, &*pool).await;
let ret = is_object_owned_by_(uid, owner, &*pool).await;
self.post_query(params.group_id)?;
return ret
}
@ -424,7 +485,7 @@ impl Database for CachedSqlCipher {
async fn list_user_access_rights_on_object(
&self,
uid: &str,
userid: &str,
user: &str,
no_inherited_access: bool,
params: Option<&ExtraDatabaseParams>,
) -> KResult<HashSet<ObjectOperationType>> {
@ -433,7 +494,7 @@ impl Database for CachedSqlCipher {
if let Some(params) = params {
let pool = self.pre_query(params.group_id, &params.key).await?;
let ret =
list_user_access_rights_on_object_(uid, userid, no_inherited_access, &*pool).await;
list_user_access_rights_on_object_(uid, user, no_inherited_access, &*pool).await;
self.post_query(params.group_id)?;
return ret
}
@ -443,14 +504,17 @@ impl Database for CachedSqlCipher {
async fn atomic(
&self,
owner: &str,
user: &str,
operations: &[AtomicOperation],
params: Option<&ExtraDatabaseParams>,
) -> KResult<()> {
if let Some(params) = params {
let pool = self.pre_query(params.group_id, &params.key).await?;
if is_migration_in_progress_(&*pool).await? {
kms_bail!("Migration in progress. Please retry later");
}
let mut tx = pool.begin().await?;
return match atomic_(owner, operations, &mut tx).await {
return match atomic_(user, operations, &mut tx).await {
Ok(()) => {
tx.commit().await?;
self.post_query(params.group_id)?;

View file

@ -13,7 +13,7 @@ use cosmian_kmip::crypto::{secret::Secret, symmetric::AES_256_GCM_KEY_LENGTH};
use sqlx::{Pool, Sqlite};
use tracing::info;
use crate::{kms_bail, kms_error, result::KResult};
use crate::{error::KmsError, kms_bail, kms_error, result::KResult};
macro_rules! mac {
($res: expr, $key:expr, $($bytes: expr),+) => {
@ -64,26 +64,31 @@ impl fmt::Debug for KMSSqliteCacheItem {
}
/// Give the time since EPOCH in secs
pub(crate) fn _now() -> u64 {
SystemTime::now()
pub(crate) fn _now() -> KResult<u64> {
Ok(SystemTime::now()
.duration_since(SystemTime::UNIX_EPOCH)
.expect("Unable to get duration since epoch")
.as_secs()
.map_err(|e| {
KmsError::DatabaseError(format!("Unable to get duration since epoch. Error: {e:?}"))
})?
.as_secs())
}
impl KMSSqliteCacheItem {
#[must_use]
pub(crate) fn new(sqlite: Pool<Sqlite>, mac: Vec<u8>, freeable_cache_index: usize) -> Self {
Self {
pub(crate) fn new(
sqlite: Pool<Sqlite>,
mac: Vec<u8>,
freeable_cache_index: usize,
) -> KResult<Self> {
Ok(Self {
sqlite: Arc::new(sqlite),
mac,
inserted_at: _now(),
inserted_at: _now()?,
in_used: 0,
last_used_at: 0,
closed: false,
closed_at: 0,
freeable_cache_index,
}
})
}
}
@ -122,21 +127,24 @@ impl KMSSqliteCache {
}
/// Test if a sqlite connection is opened for a given id
pub(crate) fn opened(&self, id: u128) -> bool {
let sqlites = self.sqlites.read().expect("Unable to lock for read");
pub(crate) fn opened(&self, id: u128) -> KResult<bool> {
let sqlites = self.sqlites.read().map_err(|e| {
KmsError::DatabaseError(format!("Unable to lock for read. Error: {e:?}"))
})?;
if !sqlites.contains_key(&id) {
return false
return Ok(false);
}
!sqlites[&id].closed
Ok(!sqlites[&id].closed)
}
/// Test if a sqlite connection exist in the cache
pub(crate) fn exists(&self, id: u128) -> bool {
self.sqlites
pub(crate) fn exists(&self, id: u128) -> KResult<bool> {
Ok(self
.sqlites
.read()
.expect("Unable to lock for read")
.contains_key(&id)
.map_err(|e| KmsError::DatabaseError(format!("Unable to lock for read. Error: {e:?}")))?
.contains_key(&id))
}
/// Get the sqlite handler and tag it as "used"
@ -147,7 +155,9 @@ impl KMSSqliteCache {
id: u128,
key: &Secret<AES_256_GCM_KEY_LENGTH>,
) -> KResult<Arc<Pool<Sqlite>>> {
let mut sqlites = self.sqlites.write().expect("Unable to lock for write");
let mut sqlites = self.sqlites.write().map_err(|e| {
KmsError::DatabaseError(format!("Unable to lock for write. Error: {e:?}"))
})?;
let item = sqlites
.get_mut(&id)
@ -160,7 +170,7 @@ impl KMSSqliteCache {
// We need to check if the key provided by the user is the same that was used to open the database
// If we do not, we can just send any password: the database is already opened anyway.
// Do this by checking the macs
let mut mac = vec![0u8; 32];
let mut mac = vec![0_u8; 32];
mac!(mac.as_mut_slice(), key, id.to_be_bytes().as_slice());
if mac != item.mac {
kms_bail!("Database secret is wrong");
@ -170,12 +180,14 @@ impl KMSSqliteCache {
if item.in_used == 0 {
self.freeable_sqlites
.write()
.expect("Unable to lock for write")
.map_err(|e| {
KmsError::DatabaseError(format!("Unable to lock for write. Error: {e:?}"))
})?
.uncache(item.freeable_cache_index)?;
}
item.in_used += 1;
item.last_used_at = _now();
item.last_used_at = _now()?;
Ok(Arc::clone(&item.sqlite))
}
@ -185,7 +197,9 @@ impl KMSSqliteCache {
///
/// The function will return an error if the database is not in the cache or already released
pub(crate) fn release(&self, id: u128) -> KResult<()> {
let mut sqlites = self.sqlites.write().expect("Unable to lock for write");
let mut sqlites = self.sqlites.write().map_err(|e| {
KmsError::DatabaseError(format!("Unable to lock for write. Error: {e:?}"))
})?;
let item = sqlites
.get_mut(&id)
@ -201,7 +215,9 @@ impl KMSSqliteCache {
if item.in_used == 0 {
self.freeable_sqlites
.write()
.expect("Unable to lock for write")
.map_err(|e| {
KmsError::DatabaseError(format!("Unable to lock for write. Error: {e:?}"))
})?
.recache(item.freeable_cache_index)?;
}
@ -219,20 +235,24 @@ impl KMSSqliteCache {
let id = self
.freeable_sqlites
.write()
.expect("Unable to lock for write")
.map_err(|e| {
KmsError::DatabaseError(format!("Unable to lock for write. Error: {e:?}"))
})?
.pop();
let Ok(id) = id else { break }; // nothing in the cache, just leave
let sq = {
let mut sqlites = self.sqlites.write().expect("Unable to lock for write");
let mut sqlites = self.sqlites.write().map_err(|e| {
KmsError::DatabaseError(format!("Unable to lock for write. Error: {e:?}"))
})?;
let item = sqlites
.get_mut(&id)
.ok_or_else(|| kms_error!("Key is not in the cache"))?;
item.closed = true;
item.closed_at = _now();
item.closed_at = _now()?;
info!("CachedSQLCipher: freeing = {item:?}");
@ -266,12 +286,13 @@ impl KMSSqliteCache {
self.flush().await?;
// If nothing has been flush, allow to exceed max cache size
let mut sqlites = self.sqlites.write().expect("Unable to lock for write");
let mut sqlites = self.sqlites.write().map_err(|e| {
KmsError::DatabaseError(format!("Unable to lock for write. Error: {e:?}"))
})?;
let mut freeable_sqlites = self
.freeable_sqlites
.write()
.expect("Unable to lock for write");
let mut freeable_sqlites = self.freeable_sqlites.write().map_err(|e| {
KmsError::DatabaseError(format!("Unable to lock for write. Error: {e:?}"))
})?;
let item = sqlites.get_mut(&id);
if let Some(item) = item {
@ -285,7 +306,7 @@ impl KMSSqliteCache {
item.sqlite = Arc::new(pool);
item.closed = false;
item.in_used = 1;
item.last_used_at = _now();
item.last_used_at = _now()?;
} else {
info!("CachedSQLCipher: new group_id={id}");
@ -294,15 +315,15 @@ impl KMSSqliteCache {
// Add it to the SqliteCache
// compute the mac
let mut mac = vec![0u8; 32];
let mut mac = vec![0_u8; 32];
mac!(mac.as_mut_slice(), key, id.to_be_bytes().as_slice());
let mut item = KMSSqliteCacheItem::new(pool, mac, freeable_cache_id);
let mut item = KMSSqliteCacheItem::new(pool, mac, freeable_cache_id)?;
freeable_sqlites.uncache(freeable_cache_id)?;
// Make it usable (to avoid direct free after alloc in case of cache overflow)
item.in_used = 1;
item.last_used_at = _now();
item.last_used_at = _now()?;
sqlites.insert(id, item);
};
@ -484,6 +505,7 @@ impl FreeableSqliteCache {
#[cfg(test)]
mod tests {
#![allow(clippy::unwrap_used, clippy::expect_used)]
use std::{str::FromStr, sync::atomic::Ordering, time::Duration};
use cosmian_kmip::crypto::{secret::Secret, symmetric::AES_256_GCM_KEY_LENGTH};
@ -566,7 +588,7 @@ mod tests {
assert_eq!(fsc.length, 0);
assert_eq!(fsc.size, 4);
assert!(fsc.pop().is_err());
fsc.pop().unwrap_err();
assert_eq!(fsc.push(5), 4);
@ -591,7 +613,7 @@ mod tests {
assert!(fsc.uncache(4).is_err());
assert!(fsc.uncache(2).is_ok());
fsc.uncache(2).unwrap();
assert_eq!(fsc.head, 0);
assert_eq!(fsc.tail, 3);
@ -604,7 +626,7 @@ mod tests {
assert_eq!(fsc.entries[3].next, FSCNeighborEntry::Nil);
assert_eq!(fsc.entries[3].prev, FSCNeighborEntry::Chained(1));
assert!(fsc.uncache(0).is_ok());
fsc.uncache(0).unwrap();
assert_eq!(fsc.head, 1);
assert_eq!(fsc.tail, 3);
@ -615,7 +637,7 @@ mod tests {
assert_eq!(fsc.entries[1].next, FSCNeighborEntry::Chained(3));
assert_eq!(fsc.entries[1].prev, FSCNeighborEntry::Nil);
assert!(fsc.uncache(3).is_ok());
fsc.uncache(3).unwrap();
assert_eq!(fsc.head, 1);
assert_eq!(fsc.tail, 1);
@ -626,7 +648,7 @@ mod tests {
assert_eq!(fsc.entries[1].next, FSCNeighborEntry::Nil);
assert_eq!(fsc.entries[1].prev, FSCNeighborEntry::Nil);
assert!(fsc.uncache(1).is_ok());
fsc.uncache(1).unwrap();
assert_eq!(fsc.length, 0);
assert_eq!(fsc.size, 4);
@ -634,7 +656,7 @@ mod tests {
assert!(!fsc.entries[1].chained);
assert!(fsc.uncache(1).is_err());
assert!(fsc.pop().is_err());
fsc.pop().unwrap_err();
assert_eq!(fsc.push(5), 4);
assert_eq!(fsc.head, 4);
@ -658,7 +680,7 @@ mod tests {
assert!(fsc.recache(4).is_err());
assert!(fsc.recache(3).is_err());
assert!(fsc.uncache(2).is_ok());
fsc.uncache(2).unwrap();
assert_eq!(fsc.head, 0);
assert_eq!(fsc.tail, 3);
@ -671,7 +693,7 @@ mod tests {
assert_eq!(fsc.entries[3].next, FSCNeighborEntry::Nil);
assert_eq!(fsc.entries[3].prev, FSCNeighborEntry::Chained(1));
assert!(fsc.recache(2).is_ok());
fsc.recache(2).unwrap();
assert!(fsc.recache(2).is_err());
assert_eq!(fsc.head, 0);
@ -685,11 +707,11 @@ mod tests {
assert_eq!(fsc.entries[3].next, FSCNeighborEntry::Chained(2));
assert_eq!(fsc.entries[3].prev, FSCNeighborEntry::Chained(1));
assert!(fsc.uncache(0).is_ok());
assert!(fsc.uncache(1).is_ok());
assert!(fsc.uncache(2).is_ok());
assert!(fsc.uncache(3).is_ok());
assert!(fsc.recache(3).is_ok());
fsc.uncache(0).unwrap();
fsc.uncache(1).unwrap();
fsc.uncache(2).unwrap();
fsc.uncache(3).unwrap();
fsc.recache(3).unwrap();
assert_eq!(fsc.head, 3);
assert_eq!(fsc.tail, 3);
@ -714,66 +736,64 @@ mod tests {
let sqlite2 = connect().await.expect("Can't create database");
let sqlite3 = connect().await.expect("Can't create database");
assert!(cache.save(1, &password, sqlite).await.is_ok());
cache.save(1, &password, sqlite).await.unwrap();
assert_eq!(cache.current_size.load(Ordering::Relaxed), 1);
assert!(cache.save(2, &password, sqlite2).await.is_ok());
cache.save(2, &password, sqlite2).await.unwrap();
assert_eq!(cache.current_size.load(Ordering::Relaxed), 2); // flush should do nothing here
assert!(cache.opened(1));
assert!(cache.opened(2));
assert!(cache.opened(1).unwrap());
assert!(cache.opened(2).unwrap());
assert!(cache.exists(1));
assert!(cache.exists(1).unwrap());
let sqlite2 = connect().await.expect("Can't create database");
assert!(cache.save(2, &password, sqlite2).await.is_ok()); // double saved = ok
cache.save(2, &password, sqlite2).await.unwrap(); // double saved = ok
assert!(cache.release(2).is_ok());
cache.release(2).unwrap();
assert!(cache.release(2).is_err()); // not twice
assert!(cache.exists(2));
assert!(cache.opened(2)); // still opened
assert!(cache.exists(2).unwrap());
assert!(cache.opened(2).unwrap()); // still opened
assert!(!cache.exists(3));
assert!(cache.save(3, &password, sqlite3).await.is_ok());
assert!(!cache.exists(3).unwrap());
cache.save(3, &password, sqlite3).await.unwrap();
assert_eq!(cache.current_size.load(Ordering::Relaxed), 2); // flush should do nothing here
assert!(cache.opened(3)); // still opened
assert!(!cache.opened(2)); // not opened anymore
assert!(cache.exists(2));
assert!(cache.opened(3).unwrap()); // still opened
assert!(!cache.opened(2).unwrap()); // not opened anymore
assert!(cache.exists(2).unwrap());
let sqlite2 = connect().await.expect("Can't create database");
assert!(cache.save(2, &password, sqlite2).await.is_ok());
cache.save(2, &password, sqlite2).await.unwrap();
assert_eq!(cache.current_size.load(Ordering::Relaxed), 3); // flush should do nothing here
assert!(cache.opened(2));
assert!(cache.opened(2).unwrap());
assert!(cache.get(4, &password).is_err());
assert!(
cache
.get(1, &Secret::<AES_256_GCM_KEY_LENGTH>::new_random().unwrap())
.is_err()
); // bad &password
assert!(cache.get(1, &password).is_ok()); // 2 uses of sqlite1
cache.get(4, &password).unwrap_err();
cache
.get(1, &Secret::<AES_256_GCM_KEY_LENGTH>::new_random().unwrap())
.unwrap_err(); // bad &password
cache.get(1, &password).unwrap(); // 2 uses of sqlite1
let sqlite4 = connect().await.expect("Can't create database");
assert!(cache.save(4, &password, sqlite4).await.is_ok());
cache.save(4, &password, sqlite4).await.unwrap();
assert_eq!(cache.current_size.load(Ordering::Relaxed), 4); // flush should do nothing here
assert!(cache.opened(1));
assert!(cache.opened(1).unwrap());
assert!(cache.release(1).is_ok()); // 1 uses of sqlite1
cache.release(1).unwrap(); // 1 uses of sqlite1
let sqlite5 = connect().await.expect("Can't create database");
assert!(cache.save(5, &password, sqlite5).await.is_ok());
cache.save(5, &password, sqlite5).await.unwrap();
assert_eq!(cache.current_size.load(Ordering::Relaxed), 5); // flush should do nothing here
assert!(cache.opened(1));
assert!(cache.opened(1).unwrap());
assert!(cache.release(1).is_ok()); // 0 uses of sqlite1
assert!(cache.opened(1));
cache.release(1).unwrap(); // 0 uses of sqlite1
assert!(cache.opened(1).unwrap());
let sqlite6 = connect().await.expect("Can't create database");
assert!(cache.save(6, &password, sqlite6).await.is_ok());
cache.save(6, &password, sqlite6).await.unwrap();
assert_eq!(cache.current_size.load(Ordering::Relaxed), 5); // flush should do something here
assert!(!cache.opened(1));
assert!(cache.exists(1));
assert!(!cache.opened(1).unwrap());
assert!(cache.exists(1).unwrap());
assert!(cache.get(1, &password).is_err()); // get after close
cache.get(1, &password).unwrap_err(); // get after close
}
async fn connect() -> std::result::Result<sqlx::Pool<sqlx::Sqlite>, sqlx::Error> {

View file

@ -18,6 +18,9 @@ pub(crate) trait Database {
/// Return the filename of the database or `None` if not supported
fn filename(&self, group_id: u128) -> Option<PathBuf>;
/// Migrate the database to the latest version
async fn migrate(&self, params: Option<&ExtraDatabaseParams>) -> KResult<()>;
/// Insert the given Object in the database.
///
/// A new UUID will be created if none is supplier.

View file

@ -60,7 +60,7 @@ pub(crate) trait PlaceholderTrait {
/// Get node specifier depending on `object_type` (ie: `PrivateKey` or `Certificate`)
#[must_use]
fn extract_text_from_object_type_path() -> String {
"object ->> 'object_type'".to_string()
"object ->> 'object_type'".to_owned()
}
}
@ -73,7 +73,7 @@ impl PlaceholderTrait for MySqlPlaceholder {
const TYPE_INTEGER: &'static str = "SIGNED";
fn binder(_param_number: usize) -> String {
"?".to_string()
"?".to_owned()
}
fn additional_rq_from() -> Option<String> {

View file

@ -0,0 +1,24 @@
use tracing::trace;
use version_compare::{compare, Cmp};
use crate::{error::KmsError, result::KResult};
pub(crate) fn do_migration(
last_kms_version_run: &str,
current_kms_version: &str,
state: &str,
) -> KResult<bool> {
if let Ok(cmp) = compare(last_kms_version_run, current_kms_version) {
match (cmp, state) {
(Cmp::Eq | Cmp::Ge | Cmp::Gt, "ready") => {
trace!("No migration needed");
Ok(false)
}
(Cmp::Eq | Cmp::Ge | Cmp::Gt | Cmp::Ne | Cmp::Lt | Cmp::Le, _) => Ok(true),
}
} else {
Err(KmsError::DatabaseError(
"Error comparing versions".to_owned(),
))
}
}

View file

@ -1,3 +1,38 @@
/// This module contains the database implementation for the KMS server.
/// It provides functionality for interacting with different types of databases,
/// such as `SQLite``MySQL``PostgreSQL`eSQL, and Redis.
///
/// The module includes the following sub-modules:
/// - `cached_sqlcipher`: Contains the implementation for caching SQL queries using `SQLCipher`.
/// - `cached_sqlite_struct`: Contains the implementation for caching `SQLite` structures.
/// - `database_trait`: Contains the trait definition for a generic database.
/// - `mysql`: Contains the implementation for `MySQL` database.
/// - `object_with_metadata`: Contains the implementation for objects with metadata.
/// - `pgsql`: Contains the implementation for `PostgreSQL` database.
/// - `redis`: Contains the implementation for Redis database.
/// - `sqlite`: Contains the implementation for `SQLite` database.
/// - `locate_query`: Contains utility functions for locating queries.
/// - `migrate`: Contains functions for database migration.
/// - `retrieve_object_utils`: Contains utility functions for retrieving objects.
///
/// The module also defines the following types and constants:
/// - `KMSServer`: A type alias for the KMS server.
/// - `DBObject`: A struct representing a database object.
/// - `KMS_VERSION_BEFORE_MIGRATION_SUPPORT`: A constant representing the KMS version before migration support.
/// - `PGSQL_FILE_QUERIES`: A constant representing the `PostgreSQL` file queries.
/// - `MYSQL_FILE_QUERIES`: A constant representing the `MySQL` file queries.
/// - `SQLITE_FILE_QUERIES`: A constant representing the `SQLite` file queries.
///
/// The module also includes the following functions:
/// - `state_from_string`: Converts a string to a `StateEnumeration` value.
///
/// Finally, the module includes a test module for unit testing.
///
/// # Errors
///
/// This module does not define any specific errors. However, it may return errors
/// from the underlying database operations or from the functions defined in the sub-modules.
/// The specific error types and conditions are documented in the respective functions.
use cosmian_kmip::kmip::{
kmip_objects::{Object, ObjectType},
kmip_types::StateEnumeration,
@ -20,15 +55,14 @@ pub(crate) mod redis;
pub(crate) mod sqlite;
pub(crate) use database_trait::{AtomicOperation, Database};
mod locate_query;
mod migrate;
mod retrieve_object_utils;
pub(crate) use locate_query::{
query_from_attributes, MySqlPlaceholder, PgSqlPlaceholder, SqlitePlaceholder,
};
pub(crate) use retrieve_object_utils::retrieve_object_for_operation;
#[cfg(test)]
mod tests;
const KMS_VERSION_BEFORE_MIGRATION_SUPPORT: &str = "4.12.0";
const PGSQL_FILE_QUERIES: &str = include_str!("query.sql");
const MYSQL_FILE_QUERIES: &str = include_str!("query_mysql.sql");
const SQLITE_FILE_QUERIES: &str = include_str!("query.sql");
@ -55,6 +89,11 @@ pub(crate) struct DBObject {
pub(crate) object: Object,
}
/// Converts a string to a `StateEnumeration` value.
///
/// # Errors
///
/// Returns an error if the input string does not match any valid `StateEnumeration` value.
pub fn state_from_string(s: &str) -> KResult<StateEnumeration> {
match s {
"PreActive" => Ok(StateEnumeration::PreActive),
@ -66,3 +105,6 @@ pub fn state_from_string(s: &str) -> KResult<StateEnumeration> {
x => kms_bail!("invalid state in db: {}", x),
}
}
#[cfg(test)]
mod tests;

View file

@ -5,6 +5,7 @@ use std::{
};
use async_trait::async_trait;
use clap::crate_version;
use cosmian_kmip::kmip::{
kmip_objects::Object,
kmip_operations::ErrorReason,
@ -25,11 +26,28 @@ use super::{
};
use crate::{
core::extra_database_params::ExtraDatabaseParams,
database::database_trait::AtomicOperation,
database::{
database_trait::AtomicOperation, migrate::do_migration,
KMS_VERSION_BEFORE_MIGRATION_SUPPORT,
},
kms_bail, kms_error,
result::{KResult, KResultHelper},
};
#[macro_export]
macro_rules! get_mysql_query {
($name:literal) => {
MYSQL_QUERIES
.get($name)
.ok_or_else(|| kms_error!("{} SQL query can't be found", $name))?
};
($name:expr) => {
MYSQL_QUERIES
.get($name)
.ok_or_else(|| kms_error!("{} SQL query can't be found", $name))?
};
}
/// The `MySQL` connector is also compatible to connect a `MariaDB`
/// see: <https://mariadb.com/kb/en/mariadb-vs-mysql-compatibility>/
pub(crate) struct MySqlPool {
@ -47,35 +65,29 @@ impl MySqlPool {
.connect_with(options)
.await?;
sqlx::query(
MYSQL_QUERIES
.get("create-table-objects")
.ok_or_else(|| kms_error!("SQL query can't be found"))?,
)
.execute(&pool)
.await?;
sqlx::query(get_mysql_query!("create-table-context"))
.execute(&pool)
.await?;
sqlx::query(
MYSQL_QUERIES
.get("create-table-read_access")
.ok_or_else(|| kms_error!("SQL query can't be found"))?,
)
.execute(&pool)
.await?;
sqlx::query(get_mysql_query!("create-table-objects"))
.execute(&pool)
.await?;
sqlx::query(
MYSQL_QUERIES
.get("create-table-tags")
.ok_or_else(|| kms_error!("SQL query can't be found"))?,
)
.execute(&pool)
.await?;
sqlx::query(get_mysql_query!("create-table-read_access"))
.execute(&pool)
.await?;
sqlx::query(get_mysql_query!("create-table-tags"))
.execute(&pool)
.await?;
if clear_database {
clear_database_(&pool).await?;
}
Ok(Self { pool })
let mysql_pool = Self { pool };
mysql_pool.migrate(None).await?;
Ok(mysql_pool)
}
}
@ -85,17 +97,59 @@ impl Database for MySqlPool {
None
}
async fn migrate(&self, _params: Option<&ExtraDatabaseParams>) -> KResult<()> {
trace!("Migrate database");
// Get the context rows
match sqlx::query(get_mysql_query!("select-context"))
.fetch_optional(&self.pool)
.await?
{
None => {
trace!("No context row found, migrating from scratch");
return migrate_(
&self.pool,
KMS_VERSION_BEFORE_MIGRATION_SUPPORT,
"insert-context",
)
.await;
}
Some(context_row) => {
let last_kms_version_run = context_row.get::<String, _>(0);
let state = context_row.get::<String, _>(1);
trace!(
"Context row found, migrating from version {last_kms_version_run} (state: \
{state})"
);
let current_kms_version = crate_version!();
debug!(
"[state={state}] Last KMS version run: {last_kms_version_run}, Current KMS \
version: {current_kms_version}"
);
if do_migration(&last_kms_version_run, current_kms_version, &state)? {
return migrate_(&self.pool, current_kms_version, "update-context").await;
}
}
}
Ok(())
}
async fn create(
&self,
uid: Option<String>,
user: &str,
owner: &str,
object: &Object,
attributes: &Attributes,
tags: &HashSet<String>,
_params: Option<&ExtraDatabaseParams>,
) -> KResult<String> {
if is_migration_in_progress_(&self.pool).await? {
kms_bail!("Migration in progress. Please retry later");
}
let mut tx = self.pool.begin().await?;
let uid = match create_(uid, user, object, attributes, tags, &mut tx).await {
let uid = match create_(uid, owner, object, attributes, tags, &mut tx).await {
Ok(uid) => uid,
Err(e) => {
tx.rollback().await.context("transaction failed")?;
@ -110,10 +164,10 @@ impl Database for MySqlPool {
&self,
uid_or_tags: &str,
user: &str,
operation_type: ObjectOperationType,
query_access_grant: ObjectOperationType,
_params: Option<&ExtraDatabaseParams>,
) -> KResult<HashMap<String, ObjectWithMetadata>> {
retrieve_(uid_or_tags, user, operation_type, &self.pool).await
retrieve_(uid_or_tags, user, query_access_grant, &self.pool).await
}
async fn retrieve_tags(
@ -151,6 +205,9 @@ impl Database for MySqlPool {
state: StateEnumeration,
_params: Option<&ExtraDatabaseParams>,
) -> KResult<()> {
if is_migration_in_progress_(&self.pool).await? {
kms_bail!("Migration in progress. Please retry later");
}
let mut tx = self.pool.begin().await?;
match update_state_(uid, state, &mut tx).await {
Ok(()) => {
@ -174,6 +231,10 @@ impl Database for MySqlPool {
state: StateEnumeration,
_params: Option<&ExtraDatabaseParams>,
) -> KResult<()> {
if is_migration_in_progress_(&self.pool).await? {
kms_bail!("Migration in progress. Please retry later");
}
let mut tx = self.pool.begin().await?;
match upsert_(uid, user, object, attributes, tags, state, &mut tx).await {
Ok(()) => {
@ -193,6 +254,10 @@ impl Database for MySqlPool {
user: &str,
_params: Option<&ExtraDatabaseParams>,
) -> KResult<()> {
if is_migration_in_progress_(&self.pool).await? {
kms_bail!("Migration in progress. Please retry later");
}
let mut tx = self.pool.begin().await?;
match delete_(uid, user, &mut tx).await {
Ok(()) => {
@ -225,30 +290,38 @@ impl Database for MySqlPool {
async fn grant_access(
&self,
uid: &str,
userid: &str,
user: &str,
operation_types: HashSet<ObjectOperationType>,
_params: Option<&ExtraDatabaseParams>,
) -> KResult<()> {
insert_access_(uid, userid, operation_types, &self.pool).await
if is_migration_in_progress_(&self.pool).await? {
kms_bail!("Migration in progress. Please retry later");
}
insert_access_(uid, user, operation_types, &self.pool).await
}
async fn remove_access(
&self,
uid: &str,
userid: &str,
user: &str,
operation_types: HashSet<ObjectOperationType>,
_params: Option<&ExtraDatabaseParams>,
) -> KResult<()> {
remove_access_(uid, userid, operation_types, &self.pool).await
if is_migration_in_progress_(&self.pool).await? {
kms_bail!("Migration in progress. Please retry later");
}
remove_access_(uid, user, operation_types, &self.pool).await
}
async fn is_object_owned_by(
&self,
uid: &str,
userid: &str,
owner: &str,
_params: Option<&ExtraDatabaseParams>,
) -> KResult<bool> {
is_object_owned_by_(uid, userid, &self.pool).await
is_object_owned_by_(uid, owner, &self.pool).await
}
async fn find(
@ -272,21 +345,25 @@ impl Database for MySqlPool {
async fn list_user_access_rights_on_object(
&self,
uid: &str,
userid: &str,
user: &str,
no_inherited_access: bool,
_params: Option<&ExtraDatabaseParams>,
) -> KResult<HashSet<ObjectOperationType>> {
list_user_access_rights_on_object_(uid, userid, no_inherited_access, &self.pool).await
list_user_access_rights_on_object_(uid, user, no_inherited_access, &self.pool).await
}
async fn atomic(
&self,
owner: &str,
user: &str,
operations: &[AtomicOperation],
_params: Option<&ExtraDatabaseParams>,
) -> KResult<()> {
if is_migration_in_progress_(&self.pool).await? {
kms_bail!("Migration in progress. Please retry later");
}
let mut tx = self.pool.begin().await?;
match atomic_(owner, operations, &mut tx).await {
match atomic_(user, operations, &mut tx).await {
Ok(()) => {
tx.commit().await?;
Ok(())
@ -321,30 +398,22 @@ pub(crate) async fn create_(
// If the uid is not provided, generate a new one
let uid = uid.unwrap_or_else(|| Uuid::new_v4().to_string());
sqlx::query(
MYSQL_QUERIES
.get("insert-objects")
.ok_or_else(|| kms_error!("SQL query can't be found"))?,
)
.bind(uid.clone())
.bind(object_json)
.bind(attributes_json)
.bind(StateEnumeration::Active.to_string())
.bind(owner)
.execute(&mut **executor)
.await?;
sqlx::query(get_mysql_query!("insert-objects"))
.bind(uid.clone())
.bind(object_json)
.bind(attributes_json)
.bind(StateEnumeration::Active.to_string())
.bind(owner)
.execute(&mut **executor)
.await?;
// Insert the tags
for tag in tags {
sqlx::query(
MYSQL_QUERIES
.get("insert-tags")
.ok_or_else(|| kms_error!("SQL query can't be found"))?,
)
.bind(uid.clone())
.bind(tag)
.execute(&mut **executor)
.await?;
sqlx::query(get_mysql_query!("insert-tags"))
.bind(uid.clone())
.bind(tag)
.execute(&mut **executor)
.await?;
}
trace!("Created in DB: {uid} / {owner}");
@ -371,10 +440,7 @@ where
let tags_params = tags.iter().map(|_| "?").collect::<Vec<_>>().join(", ");
// Build the raw SQL query
let raw_sql = MYSQL_QUERIES
.get("select-from-tags")
.context("SQL query can't be found")?
.replace("@TAGS", &tags_params);
let raw_sql = get_mysql_query!("select-from-tags").replace("@TAGS", &tags_params);
// Bind the tags params
let mut query = sqlx::query::<MySql>(&raw_sql);
@ -382,21 +448,17 @@ where
query = query.bind(tag);
}
// Bind the tags len and the user
query = query.bind(tags.len() as i16).bind(user);
query = query.bind(i16::try_from(tags.len())?).bind(user);
// Execute the query
query.fetch_all(executor).await?
} else {
sqlx::query(
MYSQL_QUERIES
.get("select-object")
.ok_or_else(|| kms_error!("SQL query can't be found"))?,
)
.bind(user)
.bind(uid_or_tags)
.fetch_optional(executor)
.await?
.map_or(vec![], |row| vec![row])
sqlx::query(get_mysql_query!("select-object"))
.bind(user)
.bind(uid_or_tags)
.fetch_optional(executor)
.await?
.map_or(vec![], |row| vec![row])
};
// process the rows and find the tags
@ -434,14 +496,10 @@ async fn retrieve_tags_<'e, E>(uid: &str, executor: E) -> KResult<HashSet<String
where
E: Executor<'e, Database = MySql> + Copy,
{
let rows: Vec<MySqlRow> = sqlx::query(
MYSQL_QUERIES
.get("select-tags")
.ok_or_else(|| kms_error!("SQL query can't be found"))?,
)
.bind(uid)
.fetch_all(executor)
.await?;
let rows: Vec<MySqlRow> = sqlx::query(get_mysql_query!("select-tags"))
.bind(uid)
.fetch_all(executor)
.await?;
let tags = rows.iter().map(|r| r.get(0)).collect::<HashSet<String>>();
@ -466,39 +524,27 @@ pub(crate) async fn update_object_(
.context("failed serializing the attributes to JSON")
.reason(ErrorReason::Internal_Server_Error)?;
sqlx::query(
MYSQL_QUERIES
.get("update-object-with-object")
.ok_or_else(|| kms_error!("SQL query can't be found"))?,
)
.bind(object_json)
.bind(attributes_json)
.bind(uid)
.execute(&mut **executor)
.await?;
// Insert the new tags if any
if let Some(tags) = tags {
// delete the existing tags
sqlx::query(
MYSQL_QUERIES
.get("delete-tags")
.ok_or_else(|| kms_error!("SQL query can't be found"))?,
)
sqlx::query(get_mysql_query!("update-object-with-object"))
.bind(object_json)
.bind(attributes_json)
.bind(uid)
.execute(&mut **executor)
.await?;
for tag in tags {
sqlx::query(
MYSQL_QUERIES
.get("insert-tags")
.ok_or_else(|| kms_error!("SQL query can't be found"))?,
)
// Insert the new tags if any
if let Some(tags) = tags {
// delete the existing tags
sqlx::query(get_mysql_query!("delete-tags"))
.bind(uid)
.bind(tag)
.execute(&mut **executor)
.await?;
for tag in tags {
sqlx::query(get_mysql_query!("insert-tags"))
.bind(uid)
.bind(tag)
.execute(&mut **executor)
.await?;
}
}
@ -511,15 +557,11 @@ pub(crate) async fn update_state_(
state: StateEnumeration,
executor: &mut Transaction<'_, MySql>,
) -> KResult<()> {
sqlx::query(
MYSQL_QUERIES
.get("update-object-with-state")
.ok_or_else(|| kms_error!("SQL query can't be found"))?,
)
.bind(state.to_string())
.bind(uid)
.execute(&mut **executor)
.await?;
sqlx::query(get_mysql_query!("update-object-with-state"))
.bind(state.to_string())
.bind(uid)
.execute(&mut **executor)
.await?;
trace!("Updated in DB: {uid}");
Ok(())
}
@ -530,25 +572,17 @@ pub(crate) async fn delete_(
executor: &mut Transaction<'_, MySql>,
) -> KResult<()> {
// delete the object
sqlx::query(
MYSQL_QUERIES
.get("delete-object")
.ok_or_else(|| kms_error!("SQL query can't be found"))?,
)
.bind(uid)
.bind(owner)
.execute(&mut **executor)
.await?;
sqlx::query(get_mysql_query!("delete-object"))
.bind(uid)
.bind(owner)
.execute(&mut **executor)
.await?;
// delete the tags
sqlx::query(
MYSQL_QUERIES
.get("delete-tags")
.ok_or_else(|| kms_error!("SQL query can't be found"))?,
)
.bind(uid)
.execute(&mut **executor)
.await?;
sqlx::query(get_mysql_query!("delete-tags"))
.bind(uid)
.execute(&mut **executor)
.await?;
trace!("Deleted in DB: {uid}");
Ok(())
@ -574,43 +608,31 @@ pub(crate) async fn upsert_(
.context("failed serializing the attributes to JSON")
.reason(ErrorReason::Internal_Server_Error)?;
sqlx::query(
MYSQL_QUERIES
.get("upsert-object")
.ok_or_else(|| kms_error!("SQL query can't be found"))?,
)
.bind(uid)
.bind(object_json)
.bind(attributes_json)
.bind(state.to_string())
.bind(owner)
.bind(owner)
.bind(owner)
.execute(&mut **executor)
.await?;
sqlx::query(get_mysql_query!("upsert-object"))
.bind(uid)
.bind(object_json)
.bind(attributes_json)
.bind(state.to_string())
.bind(owner)
.bind(owner)
.bind(owner)
.execute(&mut **executor)
.await?;
// Insert the new tags if present
if let Some(tags) = tags {
// delete the existing tags
sqlx::query(
MYSQL_QUERIES
.get("delete-tags")
.ok_or_else(|| kms_error!("SQL query can't be found"))?,
)
.bind(uid)
.execute(&mut **executor)
.await?;
// insert the new ones
for tag in tags {
sqlx::query(
MYSQL_QUERIES
.get("insert-tags")
.ok_or_else(|| kms_error!("SQL query can't be found"))?,
)
sqlx::query(get_mysql_query!("delete-tags"))
.bind(uid)
.bind(tag)
.execute(&mut **executor)
.await?;
// insert the new ones
for tag in tags {
sqlx::query(get_mysql_query!("insert-tags"))
.bind(uid)
.bind(tag)
.execute(&mut **executor)
.await?;
}
}
@ -627,14 +649,10 @@ where
{
debug!("Uid = {}", uid);
let list = sqlx::query(
MYSQL_QUERIES
.get("select-rows-read_access-with-object-id")
.ok_or_else(|| kms_error!("SQL query can't be found"))?,
)
.bind(uid)
.fetch_all(executor)
.await?;
let list = sqlx::query(get_mysql_query!("select-rows-read_access-with-object-id"))
.bind(uid)
.fetch_all(executor)
.await?;
let mut ids: HashMap<String, HashSet<ObjectOperationType>> = HashMap::with_capacity(list.len());
for row in list {
ids.insert(
@ -656,14 +674,10 @@ where
E: Executor<'e, Database = MySql> + Copy,
{
debug!("Owner = {}", user);
let list = sqlx::query(
MYSQL_QUERIES
.get("select-objects-access-obtained")
.ok_or_else(|| kms_error!("SQL query can't be found"))?,
)
.bind(user)
.fetch_all(executor)
.await?;
let list = sqlx::query(get_mysql_query!("select-objects-access-obtained"))
.bind(user)
.fetch_all(executor)
.await?;
let mut ids: HashMap<String, (String, StateEnumeration, HashSet<ObjectOperationType>)> =
HashMap::with_capacity(list.len());
for row in list {
@ -704,15 +718,11 @@ async fn perms<'e, E>(uid: &str, userid: &str, executor: E) -> KResult<HashSet<O
where
E: Executor<'e, Database = MySql> + Copy,
{
let row: Option<MySqlRow> = sqlx::query(
MYSQL_QUERIES
.get("select-user-accesses-for-object")
.ok_or_else(|| kms_error!("SQL query can't be found"))?,
)
.bind(uid)
.bind(userid)
.fetch_optional(executor)
.await?;
let row: Option<MySqlRow> = sqlx::query(get_mysql_query!("select-user-accesses-for-object"))
.bind(uid)
.bind(userid)
.fetch_optional(executor)
.await?;
row.map_or(Ok(HashSet::new()), |row| {
let perms_raw = row.get::<Vec<u8>, _>(0);
@ -745,16 +755,12 @@ where
.reason(ErrorReason::Internal_Server_Error)?;
// Upsert the DB
sqlx::query(
MYSQL_QUERIES
.get("upsert-row-read_access")
.ok_or_else(|| kms_error!("SQL query can't be found"))?,
)
.bind(uid)
.bind(userid)
.bind(json)
.execute(executor)
.await?;
sqlx::query(get_mysql_query!("upsert-row-read_access"))
.bind(uid)
.bind(userid)
.bind(json)
.execute(executor)
.await?;
trace!("Insert read access right in DB: {uid} / {userid}");
Ok(())
}
@ -777,15 +783,11 @@ where
// No remaining permissions, delete the row
if perms.is_empty() {
sqlx::query(
MYSQL_QUERIES
.get("delete-rows-read_access")
.ok_or_else(|| kms_error!("SQL query can't be found"))?,
)
.bind(uid)
.bind(userid)
.execute(executor)
.await?;
sqlx::query(get_mysql_query!("delete-rows-read_access"))
.bind(uid)
.bind(userid)
.execute(executor)
.await?;
return Ok(())
}
@ -795,16 +797,12 @@ where
.reason(ErrorReason::Internal_Server_Error)?;
// Update the DB
sqlx::query(
MYSQL_QUERIES
.get("update-rows-read_access-with-permission")
.ok_or_else(|| kms_error!("SQL query can't be found"))?,
)
.bind(json)
.bind(uid)
.bind(userid)
.execute(executor)
.await?;
sqlx::query(get_mysql_query!("update-rows-read_access-with-permission"))
.bind(json)
.bind(uid)
.bind(userid)
.execute(executor)
.await?;
Ok(())
}
@ -812,15 +810,11 @@ pub(crate) async fn is_object_owned_by_<'e, E>(uid: &str, owner: &str, executor:
where
E: Executor<'e, Database = MySql> + Copy,
{
let row: Option<MySqlRow> = sqlx::query(
MYSQL_QUERIES
.get("has-row-objects")
.ok_or_else(|| kms_error!("SQL query can't be found"))?,
)
.bind(uid)
.bind(owner)
.fetch_optional(executor)
.await?;
let row: Option<MySqlRow> = sqlx::query(get_mysql_query!("has-row-objects"))
.bind(uid)
.bind(owner)
.fetch_optional(executor)
.await?;
Ok(row.is_some())
}
@ -876,30 +870,22 @@ async fn clear_database_<'e, E>(executor: E) -> KResult<()>
where
E: Executor<'e, Database = MySql> + Copy,
{
// Erase `context` table
sqlx::query(get_mysql_query!("clean-table-context"))
.execute(executor)
.await?;
// Erase `objects` table
sqlx::query(
MYSQL_QUERIES
.get("clean-table-objects")
.expect("SQL query can't be found"),
)
.execute(executor)
.await?;
sqlx::query(get_mysql_query!("clean-table-objects"))
.execute(executor)
.await?;
// Erase `read_access` table
sqlx::query(
MYSQL_QUERIES
.get("clean-table-read_access")
.expect("SQL query can't be found"),
)
.execute(executor)
.await?;
sqlx::query(get_mysql_query!("clean-table-read_access"))
.execute(executor)
.await?;
// Erase `tags` table
sqlx::query(
MYSQL_QUERIES
.get("clean-table-tags")
.expect("SQL query can't be found"),
)
.execute(executor)
.await?;
sqlx::query(get_mysql_query!("clean-table-tags"))
.execute(executor)
.await?;
Ok(())
}
@ -943,3 +929,140 @@ pub(crate) async fn atomic_(
}
Ok(())
}
pub(crate) async fn is_migration_in_progress_<'e, E>(executor: E) -> KResult<bool>
where
E: Executor<'e, Database = MySql> + Copy,
{
match sqlx::query(get_mysql_query!("select-context"))
.fetch_optional(executor)
.await?
{
Some(context_row) => {
let state = context_row.get::<String, _>(1);
Ok(state == "upgrading")
}
None => Ok(false),
}
}
pub(crate) async fn migrate_(
executor: &Pool<MySql>,
last_version_run: &str,
query_name: &str,
) -> KResult<()> {
trace!("Set status to upgrading and last version run: {last_version_run}");
let upsert_context = get_mysql_query!(query_name);
trace!("{query_name}: {upsert_context}");
match query_name {
"insert-context" => {
sqlx::query(upsert_context)
.bind(last_version_run)
.bind("upgrading")
.execute(executor)
.await
}
"update-context" => {
sqlx::query(upsert_context)
.bind(last_version_run)
.bind("upgrading")
.bind("upgrading")
.execute(executor)
.await
}
_ => kms_bail!("Unknown query name: {query_name}"),
}?;
trace!("Migrate data from version {last_version_run}");
// Process migration for each KMS version
let current_kms_version = crate_version!();
if last_version_run == KMS_VERSION_BEFORE_MIGRATION_SUPPORT {
migrate_from_4_12_0_to_4_13_0(executor).await?;
} else {
trace!("No migration needed between {last_version_run} and {current_kms_version}");
}
// Set the current running version
trace!("Set status to ready and last version run: {current_kms_version}");
sqlx::query(get_mysql_query!("update-context"))
.bind(current_kms_version)
.bind("ready")
.bind("upgrading")
.execute(executor)
.await?;
Ok(())
}
/// Before the version 4.13.0, the KMIP attributes were stored in the objects table (via the objects themselves).
/// The new column attributes allows to store the KMIP attributes in a dedicated column even for KMIP objects that do not have KMIP attributes (such as Certificates).
pub(crate) async fn migrate_from_4_12_0_to_4_13_0(executor: &Pool<MySql>) -> KResult<()> {
trace!("Migrating from 4.12.0 to 4.13.0");
// Add the column attributes to the objects table
if (sqlx::query(get_mysql_query!("has-column-attributes"))
.execute(executor)
.await)
.is_ok()
{
trace!("Column attributes already exists, nothing to do");
return Ok(());
}
trace!("Column attributes does not exist, adding it");
sqlx::query(get_mysql_query!("add-column-attributes"))
.execute(executor)
.await?;
// Select all objects and extract the KMIP attributes to be stored in the new column
let rows = sqlx::query("SELECT * FROM objects")
.fetch_all(executor)
.await?;
let mut operations = Vec::with_capacity(rows.len());
for row in rows {
let uid = row.get::<String, _>(0);
let db_object: DBObject = serde_json::from_slice(&row.get::<Vec<u8>, _>(1))
.context("migrate: failed deserializing the object")
.reason(ErrorReason::Internal_Server_Error)?;
let object = Object::post_fix(db_object.object_type, db_object.object);
trace!(
"migrate_from_4_12_0_to_4_13_0: object (type: {})={:?}",
object.object_type(),
uid
);
let attributes = match object.clone().attributes() {
Ok(attrs) => attrs.clone(),
Err(_error) => {
// For example, Certificate object has no KMIP-attribute
Attributes::default()
}
};
let tags = retrieve_tags_(&uid, executor).await?;
operations.push(AtomicOperation::UpdateObject((
uid,
object,
attributes,
Some(tags),
)));
}
let mut tx = executor.begin().await?;
match atomic_(
"this user is not used to update objects",
&operations,
&mut tx,
)
.await
{
Ok(()) => {
tx.commit().await?;
Ok(())
}
Err(e) => {
tx.rollback().await.context("transaction failed")?;
Err(e)
}
}
}

View file

@ -62,7 +62,8 @@ impl TryFrom<&SqliteRow> for ObjectWithMetadata {
.context("failed deserializing the object")
.reason(ErrorReason::Internal_Server_Error)?;
let object = Object::post_fix(db_object.object_type, db_object.object);
let attributes = serde_json::from_str(&row.get::<String, _>(2))?;
let raw_attributes = row.get::<Value, _>(2);
let attributes = serde_json::from_value(raw_attributes)?;
let owner = row.get::<String, _>(3);
let state = state_from_string(&row.get::<String, _>(4))?;
let raw_permissions = row.get::<Vec<u8>, _>(5);

View file

@ -5,6 +5,7 @@ use std::{
};
use async_trait::async_trait;
use clap::crate_version;
use cosmian_kmip::kmip::{
kmip_objects::Object,
kmip_operations::ErrorReason,
@ -22,15 +23,29 @@ use uuid::Uuid;
use crate::{
core::extra_database_params::ExtraDatabaseParams,
database::{
database_trait::AtomicOperation, object_with_metadata::ObjectWithMetadata,
query_from_attributes, state_from_string, DBObject, Database, PgSqlPlaceholder,
PGSQL_QUERIES,
database_trait::AtomicOperation, migrate::do_migration,
object_with_metadata::ObjectWithMetadata, query_from_attributes, state_from_string,
DBObject, Database, PgSqlPlaceholder, KMS_VERSION_BEFORE_MIGRATION_SUPPORT, PGSQL_QUERIES,
},
error::KmsError,
kms_bail, kms_error,
result::{KResult, KResultHelper},
};
#[macro_export]
macro_rules! get_pgsql_query {
($name:literal) => {
PGSQL_QUERIES
.get($name)
.ok_or_else(|| kms_error!("{} SQL query can't be found", $name))?
};
($name:expr) => {
PGSQL_QUERIES
.get($name)
.ok_or_else(|| kms_error!("{} SQL query can't be found", $name))?
};
}
pub(crate) struct PgPool {
pool: Pool<Postgres>,
}
@ -48,35 +63,29 @@ impl PgPool {
.connect_with(options)
.await?;
sqlx::query(
PGSQL_QUERIES
.get("create-table-objects")
.ok_or_else(|| kms_error!("SQL query can't be found"))?,
)
.execute(&pool)
.await?;
sqlx::query(get_pgsql_query!("create-table-objects"))
.execute(&pool)
.await?;
sqlx::query(
PGSQL_QUERIES
.get("create-table-read_access")
.ok_or_else(|| kms_error!("SQL query can't be found"))?,
)
.execute(&pool)
.await?;
sqlx::query(get_pgsql_query!("create-table-context"))
.execute(&pool)
.await?;
sqlx::query(
PGSQL_QUERIES
.get("create-table-tags")
.ok_or_else(|| kms_error!("SQL query can't be found"))?,
)
.execute(&pool)
.await?;
sqlx::query(get_pgsql_query!("create-table-read_access"))
.execute(&pool)
.await?;
sqlx::query(get_pgsql_query!("create-table-tags"))
.execute(&pool)
.await?;
if clear_database {
clear_database_(&pool).await?;
}
Ok(Self { pool })
let pgsql_pool = Self { pool };
pgsql_pool.migrate(None).await?;
Ok(pgsql_pool)
}
}
@ -86,17 +95,59 @@ impl Database for PgPool {
None
}
async fn migrate(&self, _params: Option<&ExtraDatabaseParams>) -> KResult<()> {
trace!("Migrate database");
// Get the context rows
match sqlx::query(get_pgsql_query!("select-context"))
.fetch_optional(&self.pool)
.await?
{
None => {
trace!("No context row found, migrating from scratch");
return migrate_(
&self.pool,
KMS_VERSION_BEFORE_MIGRATION_SUPPORT,
"insert-context",
)
.await;
}
Some(context_row) => {
let last_kms_version_run = context_row.get::<String, _>(0);
let state = context_row.get::<String, _>(1);
trace!(
"Context row found, migrating from version {last_kms_version_run} (state: \
{state})"
);
let current_kms_version = crate_version!();
debug!(
"[state={state}] Last KMS version run: {last_kms_version_run}, Current KMS \
version: {current_kms_version}"
);
if do_migration(&last_kms_version_run, current_kms_version, &state)? {
return migrate_(&self.pool, current_kms_version, "update-context").await;
}
}
}
Ok(())
}
async fn create(
&self,
uid: Option<String>,
user: &str,
owner: &str,
object: &Object,
attributes: &Attributes,
tags: &HashSet<String>,
_params: Option<&ExtraDatabaseParams>,
) -> KResult<String> {
if is_migration_in_progress_(&self.pool).await? {
kms_bail!("Migration in progress. Please retry later");
}
let mut tx = self.pool.begin().await?;
let uid = match create_(uid, user, object, attributes, tags, &mut tx).await {
let uid = match create_(uid, owner, object, attributes, tags, &mut tx).await {
Ok(uid) => uid,
Err(e) => {
tx.rollback().await.context("transaction failed")?;
@ -111,10 +162,10 @@ impl Database for PgPool {
&self,
uid_or_tags: &str,
user: &str,
operation_type: ObjectOperationType,
query_access_grant: ObjectOperationType,
_params: Option<&ExtraDatabaseParams>,
) -> KResult<HashMap<String, ObjectWithMetadata>> {
retrieve_(uid_or_tags, user, operation_type, &self.pool).await
retrieve_(uid_or_tags, user, query_access_grant, &self.pool).await
}
async fn retrieve_tags(
@ -152,6 +203,9 @@ impl Database for PgPool {
state: StateEnumeration,
_params: Option<&ExtraDatabaseParams>,
) -> KResult<()> {
if is_migration_in_progress_(&self.pool).await? {
kms_bail!("Migration in progress. Please retry later");
}
let mut tx = self.pool.begin().await?;
match update_state_(uid, state, &mut tx).await {
Ok(()) => {
@ -175,6 +229,10 @@ impl Database for PgPool {
state: StateEnumeration,
_params: Option<&ExtraDatabaseParams>,
) -> KResult<()> {
if is_migration_in_progress_(&self.pool).await? {
kms_bail!("Migration in progress. Please retry later");
}
let mut tx = self.pool.begin().await?;
match upsert_(uid, user, object, attributes, tags, state, &mut tx).await {
Ok(()) => {
@ -194,6 +252,10 @@ impl Database for PgPool {
user: &str,
_params: Option<&ExtraDatabaseParams>,
) -> KResult<()> {
if is_migration_in_progress_(&self.pool).await? {
kms_bail!("Migration in progress. Please retry later");
}
let mut tx = self.pool.begin().await?;
match delete_(uid, user, &mut tx).await {
Ok(()) => {
@ -226,30 +288,38 @@ impl Database for PgPool {
async fn grant_access(
&self,
uid: &str,
userid: &str,
user: &str,
operation_types: HashSet<ObjectOperationType>,
_params: Option<&ExtraDatabaseParams>,
) -> KResult<()> {
insert_access_(uid, userid, operation_types, &self.pool).await
if is_migration_in_progress_(&self.pool).await? {
kms_bail!("Migration in progress. Please retry later");
}
insert_access_(uid, user, operation_types, &self.pool).await
}
async fn remove_access(
&self,
uid: &str,
userid: &str,
user: &str,
operation_types: HashSet<ObjectOperationType>,
_params: Option<&ExtraDatabaseParams>,
) -> KResult<()> {
remove_access_(uid, userid, operation_types, &self.pool).await
if is_migration_in_progress_(&self.pool).await? {
kms_bail!("Migration in progress. Please retry later");
}
remove_access_(uid, user, operation_types, &self.pool).await
}
async fn is_object_owned_by(
&self,
uid: &str,
userid: &str,
owner: &str,
_params: Option<&ExtraDatabaseParams>,
) -> KResult<bool> {
is_object_owned_by_(uid, userid, &self.pool).await
is_object_owned_by_(uid, owner, &self.pool).await
}
async fn find(
@ -273,21 +343,25 @@ impl Database for PgPool {
async fn list_user_access_rights_on_object(
&self,
uid: &str,
userid: &str,
user: &str,
no_inherited_access: bool,
_params: Option<&ExtraDatabaseParams>,
) -> KResult<HashSet<ObjectOperationType>> {
list_user_access_rights_on_object_(uid, userid, no_inherited_access, &self.pool).await
list_user_access_rights_on_object_(uid, user, no_inherited_access, &self.pool).await
}
async fn atomic(
&self,
owner: &str,
user: &str,
operations: &[AtomicOperation],
_params: Option<&ExtraDatabaseParams>,
) -> KResult<()> {
if is_migration_in_progress_(&self.pool).await? {
kms_bail!("Migration in progress. Please retry later");
}
let mut tx = self.pool.begin().await?;
match atomic_(owner, operations, &mut tx).await {
match atomic_(user, operations, &mut tx).await {
Ok(()) => {
tx.commit().await?;
Ok(())
@ -322,30 +396,22 @@ pub(crate) async fn create_(
// If the uid is not provided, generate a new one
let uid = uid.unwrap_or_else(|| Uuid::new_v4().to_string());
sqlx::query(
PGSQL_QUERIES
.get("insert-objects")
.ok_or_else(|| kms_error!("SQL query can't be found"))?,
)
.bind(uid.clone())
.bind(object_json)
.bind(attributes_json)
.bind(StateEnumeration::Active.to_string())
.bind(owner)
.execute(&mut **executor)
.await?;
sqlx::query(get_pgsql_query!("insert-objects"))
.bind(uid.clone())
.bind(object_json)
.bind(attributes_json)
.bind(StateEnumeration::Active.to_string())
.bind(owner)
.execute(&mut **executor)
.await?;
// Insert the tags
for tag in tags {
sqlx::query(
PGSQL_QUERIES
.get("insert-tags")
.ok_or_else(|| kms_error!("SQL query can't be found"))?,
)
.bind(uid.clone())
.bind(tag)
.execute(&mut **executor)
.await?;
sqlx::query(get_pgsql_query!("insert-tags"))
.bind(uid.clone())
.bind(tag)
.execute(&mut **executor)
.await?;
}
trace!("Created in DB: {uid} / {owner}");
@ -377,9 +443,7 @@ where
.join(", ");
// Build the raw SQL query
let raw_sql = PGSQL_QUERIES
.get("select-from-tags")
.context("SQL query can't be found")?
let raw_sql = get_pgsql_query!("select-from-tags")
.replace("@TAGS", &tags_params)
.replace("@LEN", &format!("${}", tags.len() + 1))
.replace("@USER", &format!("${}", tags.len() + 2));
@ -390,21 +454,17 @@ where
query = query.bind(tag);
}
// Bind the tags len and the user
query = query.bind(tags.len() as i16).bind(user);
query = query.bind(i16::try_from(tags.len())?).bind(user);
// Execute the query
query.fetch_all(executor).await?
} else {
sqlx::query(
PGSQL_QUERIES
.get("select-object")
.ok_or_else(|| kms_error!("SQL query can't be found"))?,
)
.bind(uid_or_tags)
.bind(user)
.fetch_optional(executor)
.await?
.map_or(vec![], |row| vec![row])
sqlx::query(get_pgsql_query!("select-object"))
.bind(uid_or_tags)
.bind(user)
.fetch_optional(executor)
.await?
.map_or(vec![], |row| vec![row])
};
// process the rows and find the tags
@ -442,14 +502,10 @@ async fn retrieve_tags_<'e, E>(uid: &str, executor: E) -> KResult<HashSet<String
where
E: Executor<'e, Database = Postgres> + Copy,
{
let rows: Vec<PgRow> = sqlx::query(
PGSQL_QUERIES
.get("select-tags")
.ok_or_else(|| kms_error!("SQL query can't be found"))?,
)
.bind(uid)
.fetch_all(executor)
.await?;
let rows: Vec<PgRow> = sqlx::query(get_pgsql_query!("select-tags"))
.bind(uid)
.fetch_all(executor)
.await?;
let tags = rows.iter().map(|r| r.get(0)).collect::<HashSet<String>>();
@ -474,39 +530,27 @@ pub(crate) async fn update_object_(
.context("failed serializing the attributes to JSON")
.reason(ErrorReason::Internal_Server_Error)?;
sqlx::query(
PGSQL_QUERIES
.get("update-object-with-object")
.ok_or_else(|| kms_error!("SQL query can't be found"))?,
)
.bind(object_json)
.bind(attributes_json)
.bind(uid)
.execute(&mut **executor)
.await?;
// Update the tags
if let Some(tags) = tags {
// delete the existing tags
sqlx::query(
PGSQL_QUERIES
.get("delete-tags")
.ok_or_else(|| kms_error!("SQL query can't be found"))?,
)
sqlx::query(get_pgsql_query!("update-object-with-object"))
.bind(object_json)
.bind(attributes_json)
.bind(uid)
.execute(&mut **executor)
.await?;
for tag in tags {
sqlx::query(
PGSQL_QUERIES
.get("insert-tags")
.ok_or_else(|| kms_error!("SQL query can't be found"))?,
)
// Insert the new tags if any
if let Some(tags) = tags {
// delete the existing tags
sqlx::query(get_pgsql_query!("delete-tags"))
.bind(uid)
.bind(tag)
.execute(&mut **executor)
.await?;
for tag in tags {
sqlx::query(get_pgsql_query!("insert-tags"))
.bind(uid)
.bind(tag)
.execute(&mut **executor)
.await?;
}
}
@ -519,15 +563,11 @@ pub(crate) async fn update_state_(
state: StateEnumeration,
executor: &mut Transaction<'_, Postgres>,
) -> KResult<()> {
sqlx::query(
PGSQL_QUERIES
.get("update-object-with-state")
.ok_or_else(|| kms_error!("SQL query can't be found"))?,
)
.bind(state.to_string())
.bind(uid)
.execute(&mut **executor)
.await?;
sqlx::query(get_pgsql_query!("update-object-with-state"))
.bind(state.to_string())
.bind(uid)
.execute(&mut **executor)
.await?;
trace!("Updated in DB: {uid}");
Ok(())
}
@ -538,25 +578,17 @@ pub(crate) async fn delete_(
executor: &mut Transaction<'_, Postgres>,
) -> KResult<()> {
// delete the object
sqlx::query(
PGSQL_QUERIES
.get("delete-object")
.ok_or_else(|| kms_error!("SQL query can't be found"))?,
)
.bind(uid)
.bind(owner)
.execute(&mut **executor)
.await?;
sqlx::query(get_pgsql_query!("delete-object"))
.bind(uid)
.bind(owner)
.execute(&mut **executor)
.await?;
// delete the tags
sqlx::query(
PGSQL_QUERIES
.get("delete-tags")
.ok_or_else(|| kms_error!("SQL query can't be found"))?,
)
.bind(uid)
.execute(&mut **executor)
.await?;
sqlx::query(get_pgsql_query!("delete-tags"))
.bind(uid)
.execute(&mut **executor)
.await?;
trace!("Deleted in DB: {uid}");
Ok(())
@ -582,41 +614,29 @@ pub(crate) async fn upsert_(
.context("failed serializing the attributes to JSON")
.reason(ErrorReason::Internal_Server_Error)?;
sqlx::query(
PGSQL_QUERIES
.get("upsert-object")
.ok_or_else(|| kms_error!("SQL query can't be found"))?,
)
.bind(uid)
.bind(object_json)
.bind(attributes_json)
.bind(state.to_string())
.bind(owner)
.execute(&mut **executor)
.await?;
sqlx::query(get_pgsql_query!("upsert-object"))
.bind(uid)
.bind(object_json)
.bind(attributes_json)
.bind(state.to_string())
.bind(owner)
.execute(&mut **executor)
.await?;
// Insert the new tags if present
if let Some(tags) = tags {
// delete the existing tags
sqlx::query(
PGSQL_QUERIES
.get("delete-tags")
.ok_or_else(|| kms_error!("SQL query can't be found"))?,
)
.bind(uid)
.execute(&mut **executor)
.await?;
// insert the new ones
for tag in tags {
sqlx::query(
PGSQL_QUERIES
.get("insert-tags")
.ok_or_else(|| kms_error!("SQL query can't be found"))?,
)
sqlx::query(get_pgsql_query!("delete-tags"))
.bind(uid)
.bind(tag)
.execute(&mut **executor)
.await?;
// insert the new ones
for tag in tags {
sqlx::query(get_pgsql_query!("insert-tags"))
.bind(uid)
.bind(tag)
.execute(&mut **executor)
.await?;
}
}
@ -633,14 +653,10 @@ where
{
debug!("Uid = {}", uid);
let list = sqlx::query(
PGSQL_QUERIES
.get("select-rows-read_access-with-object-id")
.ok_or_else(|| kms_error!("SQL query can't be found"))?,
)
.bind(uid)
.fetch_all(executor)
.await?;
let list = sqlx::query(get_pgsql_query!("select-rows-read_access-with-object-id"))
.bind(uid)
.fetch_all(executor)
.await?;
let mut ids: HashMap<String, HashSet<ObjectOperationType>> = HashMap::with_capacity(list.len());
for row in list {
ids.insert(
@ -662,14 +678,10 @@ where
E: Executor<'e, Database = Postgres> + Copy,
{
debug!("Owner = {}", user);
let list = sqlx::query(
PGSQL_QUERIES
.get("select-objects-access-obtained")
.ok_or_else(|| kms_error!("SQL query can't be found"))?,
)
.bind(user)
.fetch_all(executor)
.await?;
let list = sqlx::query(get_pgsql_query!("select-objects-access-obtained"))
.bind(user)
.fetch_all(executor)
.await?;
let mut ids: HashMap<String, (String, StateEnumeration, HashSet<ObjectOperationType>)> =
HashMap::with_capacity(list.len());
for row in list {
@ -710,15 +722,11 @@ async fn perms<'e, E>(uid: &str, userid: &str, executor: E) -> KResult<HashSet<O
where
E: Executor<'e, Database = Postgres> + Copy,
{
let row: Option<PgRow> = sqlx::query(
PGSQL_QUERIES
.get("select-user-accesses-for-object")
.ok_or_else(|| kms_error!("SQL query can't be found"))?,
)
.bind(uid)
.bind(userid)
.fetch_optional(executor)
.await?;
let row: Option<PgRow> = sqlx::query(get_pgsql_query!("select-user-accesses-for-object"))
.bind(uid)
.bind(userid)
.fetch_optional(executor)
.await?;
row.map_or(Ok(HashSet::new()), |row| {
let perms_value = row
@ -753,16 +761,12 @@ where
.reason(ErrorReason::Internal_Server_Error)?;
// Upsert the DB
sqlx::query(
PGSQL_QUERIES
.get("upsert-row-read_access")
.ok_or_else(|| kms_error!("SQL query can't be found"))?,
)
.bind(uid)
.bind(userid)
.bind(json)
.execute(executor)
.await?;
sqlx::query(get_pgsql_query!("upsert-row-read_access"))
.bind(uid)
.bind(userid)
.bind(json)
.execute(executor)
.await?;
trace!("Insert read access right in DB: {uid} / {userid}");
Ok(())
}
@ -785,15 +789,11 @@ where
// No remaining permissions, delete the row
if perms.is_empty() {
sqlx::query(
PGSQL_QUERIES
.get("delete-rows-read_access")
.ok_or_else(|| kms_error!("SQL query can't be found"))?,
)
.bind(uid)
.bind(userid)
.execute(executor)
.await?;
sqlx::query(get_pgsql_query!("delete-rows-read_access"))
.bind(uid)
.bind(userid)
.execute(executor)
.await?;
return Ok(())
}
@ -803,16 +803,12 @@ where
.reason(ErrorReason::Internal_Server_Error)?;
// Update the DB
sqlx::query(
PGSQL_QUERIES
.get("update-rows-read_access-with-permission")
.ok_or_else(|| kms_error!("SQL query can't be found"))?,
)
.bind(uid)
.bind(userid)
.bind(json)
.execute(executor)
.await?;
sqlx::query(get_pgsql_query!("update-rows-read_access-with-permission"))
.bind(uid)
.bind(userid)
.bind(json)
.execute(executor)
.await?;
trace!("Deleted in DB: {uid} / {userid}");
Ok(())
}
@ -821,15 +817,11 @@ pub(crate) async fn is_object_owned_by_<'e, E>(uid: &str, owner: &str, executor:
where
E: Executor<'e, Database = Postgres> + Copy,
{
let row: Option<PgRow> = sqlx::query(
PGSQL_QUERIES
.get("has-row-objects")
.ok_or_else(|| kms_error!("SQL query can't be found"))?,
)
.bind(uid)
.bind(owner)
.fetch_optional(executor)
.await?;
let row: Option<PgRow> = sqlx::query(get_pgsql_query!("has-row-objects"))
.bind(uid)
.bind(owner)
.fetch_optional(executor)
.await?;
Ok(row.is_some())
}
@ -863,7 +855,7 @@ fn to_qualified_uids(
let mut uids = Vec::with_capacity(rows.len());
for row in rows {
let attrs: Attributes = match row.try_get::<Value, _>(2) {
Err(_) => return Err(KmsError::DatabaseError("no attributes found".to_string())),
Err(_) => return Err(KmsError::DatabaseError("no attributes found".to_owned())),
Ok(v) => serde_json::from_value(v)
.context("failed deserializing the attributes")
.map_err(|e| KmsError::DatabaseError(e.to_string()))?,
@ -883,30 +875,22 @@ async fn clear_database_<'e, E>(executor: E) -> KResult<()>
where
E: Executor<'e, Database = Postgres> + Copy,
{
// Erase `context` table
sqlx::query(get_pgsql_query!("clean-table-context"))
.execute(executor)
.await?;
// Erase `objects` table
sqlx::query(
PGSQL_QUERIES
.get("clean-table-objects")
.expect("SQL query can't be found"),
)
.execute(executor)
.await?;
sqlx::query(get_pgsql_query!("clean-table-objects"))
.execute(executor)
.await?;
// Erase `read_access` table
sqlx::query(
PGSQL_QUERIES
.get("clean-table-read_access")
.expect("SQL query can't be found"),
)
.execute(executor)
.await?;
sqlx::query(get_pgsql_query!("clean-table-read_access"))
.execute(executor)
.await?;
// Erase `tags` table
sqlx::query(
PGSQL_QUERIES
.get("clean-table-tags")
.expect("SQL query can't be found"),
)
.execute(executor)
.await?;
sqlx::query(get_pgsql_query!("clean-table-tags"))
.execute(executor)
.await?;
Ok(())
}
@ -950,3 +934,140 @@ pub(crate) async fn atomic_(
}
Ok(())
}
pub(crate) async fn is_migration_in_progress_<'e, E>(executor: E) -> KResult<bool>
where
E: Executor<'e, Database = Postgres> + Copy,
{
match sqlx::query(get_pgsql_query!("select-context"))
.fetch_optional(executor)
.await?
{
Some(context_row) => {
let state = context_row.get::<String, _>(1);
Ok(state == "upgrading")
}
None => Ok(false),
}
}
pub(crate) async fn migrate_(
executor: &Pool<Postgres>,
last_version_run: &str,
query_name: &str,
) -> KResult<()> {
trace!("Set status to upgrading and last version run: {last_version_run}");
let upsert_context = get_pgsql_query!(query_name);
trace!("{query_name}: {upsert_context}");
match query_name {
"insert-context" => {
sqlx::query(upsert_context)
.bind(last_version_run)
.bind("upgrading")
.execute(executor)
.await
}
"update-context" => {
sqlx::query(upsert_context)
.bind(last_version_run)
.bind("upgrading")
.bind("upgrading")
.execute(executor)
.await
}
_ => kms_bail!("Unknown query name: {query_name}"),
}?;
trace!("Migrate data from version {last_version_run}");
// Process migration for each KMS version
let current_kms_version = crate_version!();
if last_version_run == KMS_VERSION_BEFORE_MIGRATION_SUPPORT {
migrate_from_4_12_0_to_4_13_0(executor).await?;
} else {
trace!("No migration needed between {last_version_run} and {current_kms_version}");
}
// Set the current running version
trace!("Set status to ready and last version run: {current_kms_version}");
sqlx::query(get_pgsql_query!("update-context"))
.bind(current_kms_version)
.bind("ready")
.bind("upgrading")
.execute(executor)
.await?;
Ok(())
}
/// Before the version 4.13.0, the KMIP attributes were stored in the objects table (via the objects themselves).
/// The new column attributes allows to store the KMIP attributes in a dedicated column even for KMIP objects that do not have KMIP attributes (such as Certificates).
pub(crate) async fn migrate_from_4_12_0_to_4_13_0(executor: &Pool<Postgres>) -> KResult<()> {
trace!("Migrating from 4.12.0 to 4.13.0");
// Add the column attributes to the objects table
if (sqlx::query(get_pgsql_query!("has-column-attributes"))
.execute(executor)
.await)
.is_ok()
{
trace!("Column attributes already exists, nothing to do");
return Ok(());
}
trace!("Column attributes does not exist, adding it");
sqlx::query(get_pgsql_query!("add-column-attributes"))
.execute(executor)
.await?;
// Select all objects and extract the KMIP attributes to be stored in the new column
let rows = sqlx::query("SELECT * FROM objects")
.fetch_all(executor)
.await?;
let mut operations = Vec::with_capacity(rows.len());
for row in rows {
let uid = row.get::<String, _>(0);
let db_object: DBObject = serde_json::from_slice(&row.get::<Vec<u8>, _>(1))
.context("migrate: failed deserializing the object")
.reason(ErrorReason::Internal_Server_Error)?;
let object = Object::post_fix(db_object.object_type, db_object.object);
trace!(
"migrate_from_4_12_0_to_4_13_0: object (type: {})={:?}",
object.object_type(),
uid
);
let attributes = match object.clone().attributes() {
Ok(attrs) => attrs.clone(),
Err(_error) => {
// For example, Certificate object has no KMIP-attribute
Attributes::default()
}
};
let tags = retrieve_tags_(&uid, executor).await?;
operations.push(AtomicOperation::UpdateObject((
uid,
object,
attributes,
Some(tags),
)));
}
let mut tx = executor.begin().await?;
match atomic_(
"this user is not used to update objects",
&operations,
&mut tx,
)
.await
{
Ok(()) => {
tx.commit().await?;
Ok(())
}
Err(e) => {
tx.rollback().await.context("transaction failed")?;
Err(e)
}
}
}

View file

@ -1,3 +1,9 @@
-- name: create-table-context
CREATE TABLE IF NOT EXISTS context (
version VARCHAR(40) PRIMARY KEY,
state VARCHAR(40)
);
-- name: create-table-objects
CREATE TABLE IF NOT EXISTS objects (
id VARCHAR(40) PRIMARY KEY,
@ -6,6 +12,10 @@ CREATE TABLE IF NOT EXISTS objects (
state VARCHAR(32),
owner VARCHAR(255)
);
-- name: add-column-attributes
ALTER TABLE objects ADD COLUMN attributes json;
-- name: has-column-attributes
SELECT attributes from objects;
-- name: create-table-read_access
CREATE TABLE IF NOT EXISTS read_access (
@ -22,6 +32,9 @@ CREATE TABLE IF NOT EXISTS tags (
UNIQUE (id, tag)
);
-- name: clean-table-context
DELETE FROM context;
-- name: clean-table-objects
DELETE FROM objects;
@ -31,6 +44,18 @@ DELETE FROM read_access;
-- name: clean-table-tags
DELETE FROM tags;
-- name: select-context
SELECT * FROM context ORDER BY version ASC LIMIT 1;
-- name: insert-context
INSERT INTO context (version, state) VALUES ($1, $2);
-- name: update-context
UPDATE context SET version=$1, state=$2 WHERE state=$3;
-- name: delete-version
DELETE FROM context WHERE version=$1;
-- name: insert-objects
INSERT INTO objects (id, object, attributes, state, owner) VALUES ($1, $2, $3, $4, $5);

View file

@ -1,3 +1,9 @@
-- name: create-table-context
CREATE TABLE IF NOT EXISTS context (
version VARCHAR(40) PRIMARY KEY,
state VARCHAR(40)
);
-- name: create-table-objects
CREATE TABLE IF NOT EXISTS objects (
id VARCHAR(40) PRIMARY KEY,
@ -6,6 +12,10 @@ CREATE TABLE IF NOT EXISTS objects (
state VARCHAR(32),
owner VARCHAR(255)
);
-- name: add-column-attributes
ALTER TABLE objects ADD COLUMN attributes json;
-- name: has-column-attributes
SHOW COLUMNS FROM objects LIKE 'attributes';
-- name: create-table-read_access
CREATE TABLE IF NOT EXISTS read_access (
@ -22,6 +32,9 @@ CREATE TABLE IF NOT EXISTS tags (
UNIQUE (id, tag)
);
-- name: clean-table-context
DELETE FROM context;
-- name: clean-table-objects
DELETE FROM objects;
@ -31,6 +44,18 @@ DELETE FROM read_access;
-- name: clean-table-tags
DELETE FROM tags;
-- name: select-context
SELECT * FROM context LIMIT 1;
-- name: insert-context
INSERT INTO context (version, state) VALUES (?, ?);
-- name: update-context
UPDATE context SET version=?, state=? WHERE state=?;
-- name: delete-version
DELETE FROM context WHERE version=?;
-- name: insert-objects
INSERT INTO objects (id, object, attributes, state, owner) VALUES (?, ?, ?, ?, ?);

View file

@ -119,7 +119,9 @@ impl ObjectsDB {
fn encrypt_object(&self, uid: &str, redis_db_object: &RedisDbObject) -> KResult<Vec<u8>> {
let nonce = {
let mut rng = self.rng.lock().expect("failed acquiring a lock on the RNG");
let mut rng = self.rng.lock().map_err(|e| {
KmsError::DatabaseError(format!("failed acquiring a lock on the RNG. Error: {e:?}"))
})?;
Nonce::new(&mut *rng)
};
let ct = self.dem.encrypt(
@ -136,7 +138,7 @@ impl ObjectsDB {
fn decrypt_object(&self, uid: &str, ciphertext: &[u8]) -> KResult<RedisDbObject> {
if ciphertext.len() <= Aes256Gcm::NONCE_LENGTH {
return Err(KmsError::CryptographicError(
"invalid ciphertext".to_string(),
"invalid ciphertext".to_owned(),
))
}
let nonce_bytes = &ciphertext[..Aes256Gcm::NONCE_LENGTH];

View file

@ -29,8 +29,8 @@ pub(crate) struct Triple {
impl Triple {
pub(crate) fn new(obj_uid: &str, user_id: &str, permission: ObjectOperationType) -> Self {
Self {
obj_uid: obj_uid.to_string(),
user_id: user_id.to_string(),
obj_uid: obj_uid.to_owned(),
user_id: user_id.to_owned(),
permission,
}
}
@ -82,8 +82,8 @@ impl TryFrom<&Location> for Triple {
KmsError::ConversionError(format!("invalid permissions triple: {parts:?}"))
})?;
Ok(Self {
obj_uid: uid.to_string(),
user_id: user_id.to_string(),
obj_uid: uid.to_owned(),
user_id: user_id.to_owned(),
permission: serde_json::from_str(permission)?,
})
}

View file

@ -128,7 +128,7 @@ impl RedisWithFindex {
};
// the database object to index and store
let db_object =
RedisDbObject::new(object.clone(), owner.to_string(), state, Some(tags.clone()));
RedisDbObject::new(object.clone(), owner.to_owned(), state, Some(tags.clone()));
// extract the keywords
index_additions.insert(
IndexedValue::Location(Location::from(uid.as_bytes())),
@ -179,7 +179,7 @@ impl RedisWithFindex {
.objects_db
.object_get(uid)
.await?
.ok_or_else(|| KmsError::ItemNotFound(uid.to_string()))?;
.ok_or_else(|| KmsError::ItemNotFound(uid.to_owned()))?;
db_object.object = object.clone();
if tags.is_some() {
db_object.tags = tags.cloned();
@ -215,7 +215,7 @@ impl RedisWithFindex {
.objects_db
.object_get(uid)
.await?
.ok_or_else(|| KmsError::ItemNotFound(uid.to_string()))?;
.ok_or_else(|| KmsError::ItemNotFound(uid.to_owned()))?;
db_object.state = state;
// The state is not indexed, so no updates there
Ok(db_object)
@ -228,6 +228,10 @@ impl Database for RedisWithFindex {
None
}
async fn migrate(&self, _params: Option<&ExtraDatabaseParams>) -> KResult<()> {
unimplemented!("Redis-with-Findex does not support migrate operation");
}
/// Insert the given Object in the database.
///
/// A new UUID will be created if none is supplier.
@ -283,12 +287,13 @@ impl Database for RedisWithFindex {
locations
.into_iter()
.map(|location| {
String::from_utf8(location.to_vec()).map_err(|_| kms_error!("Invalid uid"))
String::from_utf8(location.to_vec())
.map_err(|e| kms_error!(format!("Invalid uid. Error: {e:?}")))
})
.collect::<KResult<HashSet<String>>>()?
} else {
// it is an UID
HashSet::from([uid_or_tags.to_string()])
HashSet::from([uid_or_tags.to_owned()])
};
// now retrieve the object
@ -381,7 +386,7 @@ impl Database for RedisWithFindex {
async fn upsert(
&self,
uid: &str,
owner: &str,
user: &str,
object: &Object,
_attributes: &Attributes,
tags: Option<&HashSet<String>>,
@ -389,7 +394,7 @@ impl Database for RedisWithFindex {
params: Option<&ExtraDatabaseParams>,
) -> KResult<()> {
let db_object = self
.prepare_object_for_upsert(uid, owner, object, tags, state, params)
.prepare_object_for_upsert(uid, user, object, tags, state, params)
.await?;
// upsert the object
@ -501,7 +506,7 @@ impl Database for RedisWithFindex {
.objects_db
.object_get(uid)
.await?
.ok_or_else(|| KmsError::ItemNotFound(uid.to_string()))?;
.ok_or_else(|| KmsError::ItemNotFound(uid.to_owned()))?;
Ok(object.owner == owner)
}
@ -549,7 +554,8 @@ impl Database for RedisWithFindex {
let uids = locations
.into_iter()
.map(|location| {
String::from_utf8(location.to_vec()).map_err(|_| kms_error!("Invalid uid"))
String::from_utf8(location.to_vec())
.map_err(|e| kms_error!(format!("Invalid uid. Error: {e:?}")))
})
.collect::<KResult<HashSet<String>>>()?;
trace!("find: uids before permissions: {:?}", uids);
@ -614,7 +620,7 @@ impl Database for RedisWithFindex {
async fn atomic(
&self,
owner: &str,
user: &str,
operations: &[AtomicOperation],
params: Option<&ExtraDatabaseParams>,
) -> KResult<()> {
@ -624,20 +630,13 @@ impl Database for RedisWithFindex {
AtomicOperation::Upsert((uid, object, _attributes, tags, state)) => {
//TODO: this operation contains a non atomic retrieve_tags. It will be hard to make this whole method atomic
let db_object = self
.prepare_object_for_upsert(
uid,
owner,
object,
tags.as_ref(),
*state,
params,
)
.prepare_object_for_upsert(uid, user, object, tags.as_ref(), *state, params)
.await?;
redis_operations.push(RedisOperation::Upsert(uid.clone(), db_object));
}
AtomicOperation::Create((uid, object, _attributes, tags)) => {
let (uid, db_object) = self
.prepare_object_for_create(Some(uid.clone()), owner, object, tags)
.prepare_object_for_create(Some(uid.clone()), user, object, tags)
.await?;
redis_operations.push(RedisOperation::Create(uid, db_object));
}

View file

@ -5,6 +5,7 @@ use std::{
};
use async_trait::async_trait;
use clap::crate_version;
use cosmian_kmip::kmip::{
kmip_objects::Object,
kmip_operations::ErrorReason,
@ -23,13 +24,28 @@ use super::object_with_metadata::ObjectWithMetadata;
use crate::{
core::extra_database_params::ExtraDatabaseParams,
database::{
database_trait::AtomicOperation, query_from_attributes, state_from_string, DBObject,
Database, SqlitePlaceholder, SQLITE_QUERIES,
database_trait::AtomicOperation, migrate::do_migration, query_from_attributes,
state_from_string, DBObject, Database, SqlitePlaceholder,
KMS_VERSION_BEFORE_MIGRATION_SUPPORT, SQLITE_QUERIES,
},
kms_bail, kms_error,
result::{KResult, KResultHelper},
};
#[macro_export]
macro_rules! get_sqlite_query {
($name:literal) => {
SQLITE_QUERIES
.get($name)
.ok_or_else(|| kms_error!("{} SQL query can't be found", $name))?
};
($name:expr) => {
SQLITE_QUERIES
.get($name)
.ok_or_else(|| kms_error!("{} SQL query can't be found", $name))?
};
}
pub(crate) struct SqlitePool {
pool: Pool<Sqlite>,
}
@ -51,35 +67,29 @@ impl SqlitePool {
.connect_with(options)
.await?;
sqlx::query(
SQLITE_QUERIES
.get("create-table-objects")
.ok_or_else(|| kms_error!("SQL query can't be found"))?,
)
.execute(&pool)
.await?;
sqlx::query(get_sqlite_query!("create-table-context"))
.execute(&pool)
.await?;
sqlx::query(
SQLITE_QUERIES
.get("create-table-read_access")
.ok_or_else(|| kms_error!("SQL query can't be found"))?,
)
.execute(&pool)
.await?;
sqlx::query(get_sqlite_query!("create-table-objects"))
.execute(&pool)
.await?;
sqlx::query(
SQLITE_QUERIES
.get("create-table-tags")
.ok_or_else(|| kms_error!("SQL query can't be found"))?,
)
.execute(&pool)
.await?;
sqlx::query(get_sqlite_query!("create-table-read_access"))
.execute(&pool)
.await?;
sqlx::query(get_sqlite_query!("create-table-tags"))
.execute(&pool)
.await?;
if clear_database {
clear_database_(&pool).await?;
}
Ok(Self { pool })
let sqlite_pool = Self { pool };
sqlite_pool.migrate(None).await?;
Ok(sqlite_pool)
}
}
@ -89,17 +99,58 @@ impl Database for SqlitePool {
None
}
async fn migrate(&self, _params: Option<&ExtraDatabaseParams>) -> KResult<()> {
trace!("Migrate database");
// Get the context rows
match sqlx::query(get_sqlite_query!("select-context"))
.fetch_optional(&self.pool)
.await?
{
None => {
trace!("No context row found, migrating from scratch");
return migrate_(
&self.pool,
KMS_VERSION_BEFORE_MIGRATION_SUPPORT,
"insert-context",
)
.await;
}
Some(context_row) => {
let last_kms_version_run = context_row.get::<String, _>(0);
let state = context_row.get::<String, _>(1);
trace!(
"Context row found, migrating from version {last_kms_version_run} (state: \
{state})"
);
let current_kms_version = crate_version!();
debug!(
"[state={state}] Last KMS version run: {last_kms_version_run}, Current KMS \
version: {current_kms_version}"
);
if do_migration(&last_kms_version_run, current_kms_version, &state)? {
return migrate_(&self.pool, current_kms_version, "update-context").await;
}
}
}
Ok(())
}
async fn create(
&self,
uid: Option<String>,
user: &str,
owner: &str,
object: &Object,
attributes: &Attributes,
tags: &HashSet<String>,
_params: Option<&ExtraDatabaseParams>,
) -> KResult<String> {
if is_migration_in_progress_(&self.pool).await? {
kms_bail!("Migration in progress. Please retry later");
}
let mut tx = self.pool.begin().await?;
let uid = match create_(uid, user, object, attributes, tags, &mut tx).await {
let uid = match create_(uid, owner, object, attributes, tags, &mut tx).await {
Ok(uid) => uid,
Err(e) => {
tx.rollback().await.context("transaction failed")?;
@ -114,10 +165,10 @@ impl Database for SqlitePool {
&self,
uid_or_tags: &str,
user: &str,
operation_type: ObjectOperationType,
query_access_grant: ObjectOperationType,
_params: Option<&ExtraDatabaseParams>,
) -> KResult<HashMap<String, ObjectWithMetadata>> {
retrieve_(uid_or_tags, user, operation_type, &self.pool).await
retrieve_(uid_or_tags, user, query_access_grant, &self.pool).await
}
async fn retrieve_tags(
@ -136,6 +187,10 @@ impl Database for SqlitePool {
tags: Option<&HashSet<String>>,
_params: Option<&ExtraDatabaseParams>,
) -> KResult<()> {
if is_migration_in_progress_(&self.pool).await? {
kms_bail!("Migration in progress. Please retry later");
}
let mut tx = self.pool.begin().await?;
match update_object_(uid, object, attributes, tags, &mut tx).await {
Ok(()) => {
@ -155,6 +210,10 @@ impl Database for SqlitePool {
state: StateEnumeration,
_params: Option<&ExtraDatabaseParams>,
) -> KResult<()> {
if is_migration_in_progress_(&self.pool).await? {
kms_bail!("Migration in progress. Please retry later");
}
let mut tx = self.pool.begin().await?;
match update_state_(uid, state, &mut tx).await {
Ok(()) => {
@ -178,6 +237,10 @@ impl Database for SqlitePool {
state: StateEnumeration,
_params: Option<&ExtraDatabaseParams>,
) -> KResult<()> {
if is_migration_in_progress_(&self.pool).await? {
kms_bail!("Migration in progress. Please retry later");
}
let mut tx = self.pool.begin().await?;
match upsert_(uid, user, object, attributes, tags, state, &mut tx).await {
Ok(()) => {
@ -197,6 +260,10 @@ impl Database for SqlitePool {
user: &str,
_params: Option<&ExtraDatabaseParams>,
) -> KResult<()> {
if is_migration_in_progress_(&self.pool).await? {
kms_bail!("Migration in progress. Please retry later");
}
let mut tx = self.pool.begin().await?;
match delete_(uid, user, &mut tx).await {
Ok(()) => {
@ -229,30 +296,38 @@ impl Database for SqlitePool {
async fn grant_access(
&self,
uid: &str,
userid: &str,
user: &str,
operation_types: HashSet<ObjectOperationType>,
_params: Option<&ExtraDatabaseParams>,
) -> KResult<()> {
insert_access_(uid, userid, operation_types, &self.pool).await
if is_migration_in_progress_(&self.pool).await? {
kms_bail!("Migration in progress. Please retry later");
}
insert_access_(uid, user, operation_types, &self.pool).await
}
async fn remove_access(
&self,
uid: &str,
userid: &str,
user: &str,
operation_types: HashSet<ObjectOperationType>,
_params: Option<&ExtraDatabaseParams>,
) -> KResult<()> {
remove_access_(uid, userid, operation_types, &self.pool).await
if is_migration_in_progress_(&self.pool).await? {
kms_bail!("Migration in progress. Please retry later");
}
remove_access_(uid, user, operation_types, &self.pool).await
}
async fn is_object_owned_by(
&self,
uid: &str,
userid: &str,
owner: &str,
_params: Option<&ExtraDatabaseParams>,
) -> KResult<bool> {
is_object_owned_by_(uid, userid, &self.pool).await
is_object_owned_by_(uid, owner, &self.pool).await
}
async fn find(
@ -276,21 +351,25 @@ impl Database for SqlitePool {
async fn list_user_access_rights_on_object(
&self,
uid: &str,
userid: &str,
user: &str,
no_inherited_access: bool,
_params: Option<&ExtraDatabaseParams>,
) -> KResult<HashSet<ObjectOperationType>> {
list_user_access_rights_on_object_(uid, userid, no_inherited_access, &self.pool).await
list_user_access_rights_on_object_(uid, user, no_inherited_access, &self.pool).await
}
async fn atomic(
&self,
owner: &str,
user: &str,
operations: &[AtomicOperation],
_params: Option<&ExtraDatabaseParams>,
) -> KResult<()> {
if is_migration_in_progress_(&self.pool).await? {
kms_bail!("Migration in progress. Please retry later");
}
let mut tx = self.pool.begin().await?;
match atomic_(owner, operations, &mut tx).await {
match atomic_(user, operations, &mut tx).await {
Ok(()) => {
tx.commit().await?;
Ok(())
@ -325,30 +404,22 @@ pub(crate) async fn create_(
// If the uid is not provided, generate a new one
let uid = uid.unwrap_or_else(|| Uuid::new_v4().to_string());
sqlx::query(
SQLITE_QUERIES
.get("insert-objects")
.ok_or_else(|| kms_error!("SQL query can't be found"))?,
)
.bind(uid.clone())
.bind(object_json)
.bind(attributes_json)
.bind(StateEnumeration::Active.to_string())
.bind(owner)
.execute(&mut **executor)
.await?;
sqlx::query(get_sqlite_query!("insert-objects"))
.bind(uid.clone())
.bind(object_json)
.bind(attributes_json)
.bind(StateEnumeration::Active.to_string())
.bind(owner)
.execute(&mut **executor)
.await?;
// Insert the tags
for tag in tags {
sqlx::query(
SQLITE_QUERIES
.get("insert-tags")
.ok_or_else(|| kms_error!("SQL query can't be found"))?,
)
.bind(uid.clone())
.bind(tag)
.execute(&mut **executor)
.await?;
sqlx::query(get_sqlite_query!("insert-tags"))
.bind(uid.clone())
.bind(tag)
.execute(&mut **executor)
.await?;
}
trace!("Created in DB: {uid} / {owner}");
@ -380,9 +451,7 @@ where
.join(", ");
// Build the raw SQL query
let raw_sql = SQLITE_QUERIES
.get("select-from-tags")
.context("SQL query can't be found")?
let raw_sql = get_sqlite_query!("select-from-tags")
.replace("@TAGS", &tags_params)
.replace("@LEN", &format!("${}", tags.len() + 1))
.replace("@USER", &format!("${}", tags.len() + 2));
@ -395,21 +464,17 @@ where
query = query.bind(tag);
}
// Bind the tags len and the user
query = query.bind(tags.len() as i16).bind(user);
query = query.bind(i16::try_from(tags.len())?).bind(user);
// Execute the query
query.fetch_all(executor).await?
} else {
sqlx::query(
SQLITE_QUERIES
.get("select-object")
.ok_or_else(|| kms_error!("SQL query can't be found"))?,
)
.bind(uid_or_tags)
.bind(user)
.fetch_optional(executor)
.await?
.map_or(vec![], |row| vec![row])
sqlx::query(get_sqlite_query!("select-object"))
.bind(uid_or_tags)
.bind(user)
.fetch_optional(executor)
.await?
.map_or(vec![], |row| vec![row])
};
// process the rows and find the tags
@ -448,14 +513,10 @@ pub(crate) async fn retrieve_tags_<'e, E>(uid: &str, executor: E) -> KResult<Has
where
E: Executor<'e, Database = Sqlite> + Copy,
{
let rows: Vec<SqliteRow> = sqlx::query(
SQLITE_QUERIES
.get("select-tags")
.ok_or_else(|| kms_error!("SQL query can't be found"))?,
)
.bind(uid)
.fetch_all(executor)
.await?;
let rows: Vec<SqliteRow> = sqlx::query(get_sqlite_query!("select-tags"))
.bind(uid)
.fetch_all(executor)
.await?;
let tags = rows.iter().map(|r| r.get(0)).collect::<HashSet<String>>();
@ -480,38 +541,26 @@ pub(crate) async fn update_object_(
.context("failed serializing the attributes to JSON")
.reason(ErrorReason::Internal_Server_Error)?;
sqlx::query(
SQLITE_QUERIES
.get("update-object-with-object")
.ok_or_else(|| kms_error!("SQL query can't be found"))?,
)
.bind(object_json)
.bind(attributes_json)
.bind(uid)
.execute(&mut **executor)
.await?;
sqlx::query(get_sqlite_query!("update-object-with-object"))
.bind(object_json)
.bind(attributes_json)
.bind(uid)
.execute(&mut **executor)
.await?;
// Insert the new tags if any
if let Some(tags) = tags {
// delete the existing tags
sqlx::query(
SQLITE_QUERIES
.get("delete-tags")
.ok_or_else(|| kms_error!("SQL query can't be found"))?,
)
.bind(uid)
.execute(&mut **executor)
.await?;
for tag in tags {
sqlx::query(
SQLITE_QUERIES
.get("insert-tags")
.ok_or_else(|| kms_error!("SQL query can't be found"))?,
)
sqlx::query(get_sqlite_query!("delete-tags"))
.bind(uid)
.bind(tag)
.execute(&mut **executor)
.await?;
for tag in tags {
sqlx::query(get_sqlite_query!("insert-tags"))
.bind(uid)
.bind(tag)
.execute(&mut **executor)
.await?;
}
}
@ -524,15 +573,11 @@ pub(crate) async fn update_state_(
state: StateEnumeration,
executor: &mut Transaction<'_, Sqlite>,
) -> KResult<()> {
sqlx::query(
SQLITE_QUERIES
.get("update-object-with-state")
.ok_or_else(|| kms_error!("SQL query can't be found"))?,
)
.bind(state.to_string())
.bind(uid)
.execute(&mut **executor)
.await?;
sqlx::query(get_sqlite_query!("update-object-with-state"))
.bind(state.to_string())
.bind(uid)
.execute(&mut **executor)
.await?;
trace!("Updated in DB: {uid}");
Ok(())
}
@ -543,25 +588,17 @@ pub(crate) async fn delete_(
executor: &mut Transaction<'_, Sqlite>,
) -> KResult<()> {
// delete the object
sqlx::query(
SQLITE_QUERIES
.get("delete-object")
.ok_or_else(|| kms_error!("SQL query can't be found"))?,
)
.bind(uid)
.bind(owner)
.execute(&mut **executor)
.await?;
sqlx::query(get_sqlite_query!("delete-object"))
.bind(uid)
.bind(owner)
.execute(&mut **executor)
.await?;
// delete the tags
sqlx::query(
SQLITE_QUERIES
.get("delete-tags")
.ok_or_else(|| kms_error!("SQL query can't be found"))?,
)
.bind(uid)
.execute(&mut **executor)
.await?;
sqlx::query(get_sqlite_query!("delete-tags"))
.bind(uid)
.execute(&mut **executor)
.await?;
trace!("Deleted in DB: {uid}");
Ok(())
@ -587,41 +624,29 @@ pub(crate) async fn upsert_(
.context("failed serializing the attributes to JSON")
.reason(ErrorReason::Internal_Server_Error)?;
sqlx::query(
SQLITE_QUERIES
.get("upsert-object")
.ok_or_else(|| kms_error!("SQL query can't be found"))?,
)
.bind(uid)
.bind(object_json)
.bind(attributes_json)
.bind(state.to_string())
.bind(owner)
.execute(&mut **executor)
.await?;
sqlx::query(get_sqlite_query!("upsert-object"))
.bind(uid)
.bind(object_json)
.bind(attributes_json)
.bind(state.to_string())
.bind(owner)
.execute(&mut **executor)
.await?;
// Insert the new tags if present
if let Some(tags) = tags {
// delete the existing tags
sqlx::query(
SQLITE_QUERIES
.get("delete-tags")
.ok_or_else(|| kms_error!("SQL query can't be found"))?,
)
.bind(uid)
.execute(&mut **executor)
.await?;
// insert the new ones
for tag in tags {
sqlx::query(
SQLITE_QUERIES
.get("insert-tags")
.ok_or_else(|| kms_error!("SQL query can't be found"))?,
)
sqlx::query(get_sqlite_query!("delete-tags"))
.bind(uid)
.bind(tag)
.execute(&mut **executor)
.await?;
// insert the new ones
for tag in tags {
sqlx::query(get_sqlite_query!("insert-tags"))
.bind(uid)
.bind(tag)
.execute(&mut **executor)
.await?;
}
}
@ -637,14 +662,10 @@ where
E: Executor<'e, Database = Sqlite> + Copy,
{
debug!("Uid = {}", uid);
let list = sqlx::query(
SQLITE_QUERIES
.get("select-rows-read_access-with-object-id")
.ok_or_else(|| kms_error!("SQL query can't be found"))?,
)
.bind(uid)
.fetch_all(executor)
.await?;
let list = sqlx::query(get_sqlite_query!("select-rows-read_access-with-object-id"))
.bind(uid)
.fetch_all(executor)
.await?;
let mut ids: HashMap<String, HashSet<ObjectOperationType>> = HashMap::with_capacity(list.len());
for row in list {
ids.insert(
@ -665,15 +686,11 @@ pub(crate) async fn list_user_granted_access_rights_<'e, E>(
where
E: Executor<'e, Database = Sqlite> + Copy,
{
debug!("Owner = {}", user);
let list = sqlx::query(
SQLITE_QUERIES
.get("select-objects-access-obtained")
.ok_or_else(|| kms_error!("SQL query can't be found"))?,
)
.bind(user)
.fetch_all(executor)
.await?;
debug!("user = {}", user);
let list = sqlx::query(get_sqlite_query!("select-objects-access-obtained"))
.bind(user)
.fetch_all(executor)
.await?;
let mut ids: HashMap<String, (String, StateEnumeration, HashSet<ObjectOperationType>)> =
HashMap::with_capacity(list.len());
for row in list {
@ -711,15 +728,11 @@ async fn perms<'e, E>(uid: &str, userid: &str, executor: E) -> KResult<HashSet<O
where
E: Executor<'e, Database = Sqlite> + Copy,
{
let row: Option<SqliteRow> = sqlx::query(
SQLITE_QUERIES
.get("select-user-accesses-for-object")
.ok_or_else(|| kms_error!("SQL query can't be found"))?,
)
.bind(uid)
.bind(userid)
.fetch_optional(executor)
.await?;
let row: Option<SqliteRow> = sqlx::query(get_sqlite_query!("select-user-accesses-for-object"))
.bind(uid)
.bind(userid)
.fetch_optional(executor)
.await?;
row.map_or(Ok(HashSet::<ObjectOperationType>::new()), |row| {
let perms_raw = row.get::<Vec<u8>, _>(0);
@ -753,16 +766,12 @@ where
.reason(ErrorReason::Internal_Server_Error)?;
// Upsert the DB
sqlx::query(
SQLITE_QUERIES
.get("upsert-row-read_access")
.ok_or_else(|| kms_error!("SQL query can't be found"))?,
)
.bind(uid)
.bind(userid)
.bind(json)
.execute(executor)
.await?;
sqlx::query(get_sqlite_query!("upsert-row-read_access"))
.bind(uid)
.bind(userid)
.bind(json)
.execute(executor)
.await?;
trace!("Insert read access right in DB: {uid} / {userid}");
Ok(())
}
@ -785,15 +794,11 @@ where
// No remaining permissions, delete the row
if perms.is_empty() {
sqlx::query(
SQLITE_QUERIES
.get("delete-rows-read_access")
.ok_or_else(|| kms_error!("SQL query can't be found"))?,
)
.bind(uid)
.bind(userid)
.execute(executor)
.await?;
sqlx::query(get_sqlite_query!("delete-rows-read_access"))
.bind(uid)
.bind(userid)
.execute(executor)
.await?;
return Ok(())
}
@ -803,16 +808,12 @@ where
.reason(ErrorReason::Internal_Server_Error)?;
// Update the DB
sqlx::query(
SQLITE_QUERIES
.get("update-rows-read_access-with-permission")
.ok_or_else(|| kms_error!("SQL query can't be found"))?,
)
.bind(uid)
.bind(userid)
.bind(json)
.execute(executor)
.await?;
sqlx::query(get_sqlite_query!("update-rows-read_access-with-permission"))
.bind(uid)
.bind(userid)
.bind(json)
.execute(executor)
.await?;
trace!("Deleted in DB: {uid} / {userid}");
Ok(())
}
@ -821,15 +822,11 @@ pub(crate) async fn is_object_owned_by_<'e, E>(uid: &str, owner: &str, executor:
where
E: Executor<'e, Database = Sqlite> + Copy,
{
let row: Option<SqliteRow> = sqlx::query(
SQLITE_QUERIES
.get("has-row-objects")
.ok_or_else(|| kms_error!("SQL query can't be found"))?,
)
.bind(uid)
.bind(owner)
.fetch_optional(executor)
.await?;
let row: Option<SqliteRow> = sqlx::query(get_sqlite_query!("has-row-objects"))
.bind(uid)
.bind(owner)
.fetch_optional(executor)
.await?;
Ok(row.is_some())
}
@ -885,30 +882,22 @@ pub(crate) async fn clear_database_<'e, E>(executor: E) -> KResult<()>
where
E: Executor<'e, Database = Sqlite> + Copy,
{
// Erase `context` table
sqlx::query(get_sqlite_query!("clean-table-context"))
.execute(executor)
.await?;
// Erase `objects` table
sqlx::query(
SQLITE_QUERIES
.get("clean-table-objects")
.expect("SQL query can't be found"),
)
.execute(executor)
.await?;
sqlx::query(get_sqlite_query!("clean-table-objects"))
.execute(executor)
.await?;
// Erase `read_access` table
sqlx::query(
SQLITE_QUERIES
.get("clean-table-read_access")
.expect("SQL query can't be found"),
)
.execute(executor)
.await?;
sqlx::query(get_sqlite_query!("clean-table-read_access"))
.execute(executor)
.await?;
// Erase `tags` table
sqlx::query(
SQLITE_QUERIES
.get("clean-table-tags")
.expect("SQL query can't be found"),
)
.execute(executor)
.await?;
sqlx::query(get_sqlite_query!("clean-table-tags"))
.execute(executor)
.await?;
Ok(())
}
@ -952,3 +941,140 @@ pub(crate) async fn atomic_(
}
Ok(())
}
pub(crate) async fn is_migration_in_progress_<'e, E>(executor: E) -> KResult<bool>
where
E: Executor<'e, Database = Sqlite> + Copy,
{
match sqlx::query(get_sqlite_query!("select-context"))
.fetch_optional(executor)
.await?
{
Some(context_row) => {
let state = context_row.get::<String, _>(1);
Ok(state == "upgrading")
}
None => Ok(false),
}
}
pub(crate) async fn migrate_(
executor: &Pool<Sqlite>,
last_version_run: &str,
query_name: &str,
) -> KResult<()> {
trace!("Set status to upgrading and last version run: {last_version_run}");
let upsert_context = get_sqlite_query!(query_name);
trace!("{query_name}: {upsert_context}");
match query_name {
"insert-context" => {
sqlx::query(upsert_context)
.bind(last_version_run)
.bind("upgrading")
.execute(executor)
.await
}
"update-context" => {
sqlx::query(upsert_context)
.bind(last_version_run)
.bind("upgrading")
.bind("upgrading")
.execute(executor)
.await
}
_ => kms_bail!("Unknown query name: {query_name}"),
}?;
trace!("Migrate data from version {last_version_run}");
// Process migration for each KMS version
let current_kms_version = crate_version!();
if last_version_run == KMS_VERSION_BEFORE_MIGRATION_SUPPORT {
migrate_from_4_12_0_to_4_13_0(executor).await?;
} else {
trace!("No migration needed between {last_version_run} and {current_kms_version}");
}
// Set the current running version
trace!("Set status to ready and last version run: {current_kms_version}");
sqlx::query(get_sqlite_query!("update-context"))
.bind(current_kms_version)
.bind("ready")
.bind("upgrading")
.execute(executor)
.await?;
Ok(())
}
/// Before the version 4.13.0, the KMIP attributes were stored in the objects table (via the objects themselves).
/// The new column attributes allows to store the KMIP attributes in a dedicated column even for KMIP objects that do not have KMIP attributes (such as Certificates).
pub(crate) async fn migrate_from_4_12_0_to_4_13_0(executor: &Pool<Sqlite>) -> KResult<()> {
trace!("Migrating from 4.12.0 to 4.13.0");
// Add the column attributes to the objects table
if (sqlx::query("SELECT attributes from objects")
.execute(executor)
.await)
.is_ok()
{
trace!("Column attributes already exists, nothing to do");
return Ok(());
}
trace!("Column attributes does not exist, adding it");
sqlx::query(get_sqlite_query!("add-column-attributes"))
.execute(executor)
.await?;
// Select all objects and extract the KMIP attributes to be stored in the new column
let rows = sqlx::query("SELECT * FROM objects")
.fetch_all(executor)
.await?;
let mut operations = Vec::with_capacity(rows.len());
for row in rows {
let uid = row.get::<String, _>(0);
let db_object: DBObject = serde_json::from_slice(&row.get::<Vec<u8>, _>(1))
.context("migrate: failed deserializing the object")
.reason(ErrorReason::Internal_Server_Error)?;
let object = Object::post_fix(db_object.object_type, db_object.object);
trace!(
"migrate_from_4_12_0_to_4_13_0: object (type: {})={:?}",
object.object_type(),
uid
);
let attributes = match object.attributes() {
Ok(attrs) => attrs.clone(),
Err(_error) => {
// For example, Certificate object has no KMIP-attribute
Attributes::default()
}
};
let tags = retrieve_tags_(&uid, executor).await?;
operations.push(AtomicOperation::UpdateObject((
uid,
object,
attributes,
Some(tags),
)));
}
let mut tx = executor.begin().await?;
match atomic_(
"this user is not used to update objects",
&operations,
&mut tx,
)
.await
{
Ok(()) => {
tx.commit().await?;
Ok(())
}
Err(e) => {
tx.rollback().await.context("transaction failed")?;
Err(e)
}
}
}

View file

@ -29,7 +29,7 @@ use crate::{
result::KResult,
};
struct DummyDB {}
struct DummyDB;
#[async_trait]
impl RemovedLocationsFinder for DummyDB {
async fn find_removed_locations(
@ -73,7 +73,7 @@ pub(crate) async fn test_objects_db() -> KResult<()> {
uid,
&RedisDbObject::new(
object.clone(),
"owner".to_string(),
"owner".to_owned(),
StateEnumeration::Active,
Some(HashSet::new()),
),
@ -127,8 +127,8 @@ pub(crate) async fn test_permissions_db() -> KResult<()> {
assert_eq!(permissions.len(), 1);
assert!(permissions.contains_key("O1"));
assert_eq!(
permissions.get("O1").unwrap(),
&HashSet::from([ObjectOperationType::Encrypt])
permissions["O1"],
HashSet::from([ObjectOperationType::Encrypt])
);
//find the permission for the object O1
@ -138,8 +138,8 @@ pub(crate) async fn test_permissions_db() -> KResult<()> {
assert_eq!(permissions.len(), 1);
assert!(permissions.contains_key("U1"));
assert_eq!(
permissions.get("U1").unwrap(),
&HashSet::from([ObjectOperationType::Encrypt])
permissions["U1"],
HashSet::from([ObjectOperationType::Encrypt])
);
// add the permission Decrypt to user U1 for object O1
@ -160,8 +160,8 @@ pub(crate) async fn test_permissions_db() -> KResult<()> {
assert_eq!(permissions.len(), 1);
assert!(permissions.contains_key("O1"));
assert_eq!(
permissions.get("O1").unwrap(),
&HashSet::from([ObjectOperationType::Encrypt, ObjectOperationType::Decrypt])
permissions["O1"],
HashSet::from([ObjectOperationType::Encrypt, ObjectOperationType::Decrypt])
);
//find the permission for the object O1
@ -171,8 +171,8 @@ pub(crate) async fn test_permissions_db() -> KResult<()> {
assert_eq!(permissions.len(), 1);
assert!(permissions.contains_key("U1"));
assert_eq!(
permissions.get("U1").unwrap(),
&HashSet::from([ObjectOperationType::Encrypt, ObjectOperationType::Decrypt])
permissions["U1"],
HashSet::from([ObjectOperationType::Encrypt, ObjectOperationType::Decrypt])
);
// the situation now is that we have
@ -194,8 +194,8 @@ pub(crate) async fn test_permissions_db() -> KResult<()> {
assert_eq!(permissions.len(), 1);
assert!(permissions.contains_key("O1"));
assert_eq!(
permissions.get("O1").unwrap(),
&HashSet::from([ObjectOperationType::Encrypt])
permissions["O1"],
HashSet::from([ObjectOperationType::Encrypt])
);
//find the permission for the object O1
@ -205,13 +205,13 @@ pub(crate) async fn test_permissions_db() -> KResult<()> {
assert_eq!(permissions.len(), 2);
assert!(permissions.contains_key("U1"));
assert_eq!(
permissions.get("U1").unwrap(),
&HashSet::from([ObjectOperationType::Encrypt, ObjectOperationType::Decrypt])
permissions["U1"],
HashSet::from([ObjectOperationType::Encrypt, ObjectOperationType::Decrypt])
);
assert!(permissions.contains_key("U2"));
assert_eq!(
permissions.get("U2").unwrap(),
&HashSet::from([ObjectOperationType::Encrypt])
permissions["U2"],
HashSet::from([ObjectOperationType::Encrypt])
);
// the situation now is that we have
@ -234,13 +234,13 @@ pub(crate) async fn test_permissions_db() -> KResult<()> {
assert_eq!(permissions.len(), 2);
assert!(permissions.contains_key("O1"));
assert_eq!(
permissions.get("O1").unwrap(),
&HashSet::from([ObjectOperationType::Encrypt])
permissions["O1"],
HashSet::from([ObjectOperationType::Encrypt])
);
assert!(permissions.contains_key("O2"));
assert_eq!(
permissions.get("O2").unwrap(),
&HashSet::from([ObjectOperationType::Encrypt])
permissions["O2"],
HashSet::from([ObjectOperationType::Encrypt])
);
//find the permission for the object O2
@ -250,8 +250,8 @@ pub(crate) async fn test_permissions_db() -> KResult<()> {
assert_eq!(permissions.len(), 1);
assert!(permissions.contains_key("U2"));
assert_eq!(
permissions.get("U2").unwrap(),
&HashSet::from([ObjectOperationType::Encrypt])
permissions["U2"],
HashSet::from([ObjectOperationType::Encrypt])
);
// the situation now is that we have
@ -275,8 +275,8 @@ pub(crate) async fn test_permissions_db() -> KResult<()> {
assert_eq!(permissions.len(), 1);
assert!(permissions.contains_key("O1"));
assert_eq!(
permissions.get("O1").unwrap(),
&HashSet::from([ObjectOperationType::Encrypt])
permissions["O1"],
HashSet::from([ObjectOperationType::Encrypt])
);
//find the permission for the object O1
@ -286,13 +286,13 @@ pub(crate) async fn test_permissions_db() -> KResult<()> {
assert_eq!(permissions.len(), 2);
assert!(permissions.contains_key("U1"));
assert_eq!(
permissions.get("U1").unwrap(),
&HashSet::from([ObjectOperationType::Encrypt])
permissions["U1"],
HashSet::from([ObjectOperationType::Encrypt])
);
assert!(permissions.contains_key("U2"));
assert_eq!(
permissions.get("U2").unwrap(),
&HashSet::from([ObjectOperationType::Encrypt])
permissions["U2"],
HashSet::from([ObjectOperationType::Encrypt])
);
// let us remove the permission Encrypt on object O1 for user U1
@ -316,8 +316,8 @@ pub(crate) async fn test_permissions_db() -> KResult<()> {
assert_eq!(permissions.len(), 1);
assert!(permissions.contains_key("U2"));
assert_eq!(
permissions.get("U2").unwrap(),
&HashSet::from([ObjectOperationType::Encrypt])
permissions["U2"],
HashSet::from([ObjectOperationType::Encrypt])
);
// let us remove the permission Encrypt on object O1 for user U2
@ -335,8 +335,8 @@ pub(crate) async fn test_permissions_db() -> KResult<()> {
assert_eq!(permissions.len(), 1);
assert!(permissions.contains_key("O2"));
assert_eq!(
permissions.get("O2").unwrap(),
&HashSet::from([ObjectOperationType::Encrypt])
permissions["O2"],
HashSet::from([ObjectOperationType::Encrypt])
);
//find the permission for the object O1
@ -398,8 +398,8 @@ pub(crate) async fn test_corner_case() -> KResult<()> {
assert_eq!(permissions.len(), 1);
assert!(permissions.contains_key("O1"));
assert_eq!(
permissions.get("O1").unwrap(),
&HashSet::from([ObjectOperationType::Encrypt])
permissions["O1"],
HashSet::from([ObjectOperationType::Encrypt])
);
// test there is one permission for object O1
@ -409,8 +409,8 @@ pub(crate) async fn test_corner_case() -> KResult<()> {
assert_eq!(permissions.len(), 1);
assert!(permissions.contains_key("U1"));
assert_eq!(
permissions.get("U1").unwrap(),
&HashSet::from([ObjectOperationType::Encrypt])
permissions["U1"],
HashSet::from([ObjectOperationType::Encrypt])
);
// remove the permission again

View file

@ -11,6 +11,7 @@ use cosmian_kmip::{
},
};
use cosmian_kms_client::access::ObjectOperationType;
use cosmian_logger::log_utils::log_init;
use uuid::Uuid;
use crate::{
@ -26,7 +27,7 @@ use crate::{
pub(crate) async fn tx_and_list<DB: Database>(
db_and_params: &(DB, Option<ExtraDatabaseParams>),
) -> KResult<()> {
cosmian_logger::log_utils::log_init(None);
log_init(None);
let db = &db_and_params.0;
let db_params = db_and_params.1.as_ref();
@ -113,7 +114,7 @@ pub(crate) async fn tx_and_list<DB: Database>(
pub(crate) async fn atomic<DB: Database>(
db_and_params: &(DB, Option<ExtraDatabaseParams>),
) -> KResult<()> {
cosmian_logger::log_utils::log_init(None);
log_init(None);
let db = &db_and_params.0;
let db_params = db_and_params.1.as_ref();
@ -234,7 +235,7 @@ pub(crate) async fn atomic<DB: Database>(
pub(crate) async fn upsert<DB: Database>(
db_and_params: &(DB, Option<ExtraDatabaseParams>),
) -> KResult<()> {
cosmian_logger::log_utils::log_init(None);
log_init(None);
let db = &db_and_params.0;
let db_params = db_and_params.1.as_ref();
@ -277,7 +278,7 @@ pub(crate) async fn upsert<DB: Database>(
let attributes = symmetric_key.attributes_mut()?;
attributes.link = Some(vec![Link {
link_type: LinkType::PreviousLink,
linked_object_identifier: LinkedObjectIdentifier::TextString("foo".to_string()),
linked_object_identifier: LinkedObjectIdentifier::TextString("foo".to_owned()),
}]);
db.upsert(
@ -300,11 +301,15 @@ pub(crate) async fn upsert<DB: Database>(
1 => {
assert_eq!(StateEnumeration::PreActive, objs_[0].state);
assert_eq!(
objs_[0].object.attributes()?.link.as_ref().ok_or_else(
|| KmsError::ServerError("links should not be empty".to_string())
)?[0]
.linked_object_identifier,
LinkedObjectIdentifier::TextString("foo".to_string())
objs_[0]
.object
.attributes()?
.link
.as_ref()
.ok_or_else(|| KmsError::ServerError("links should not be empty".to_owned()))?
[0]
.linked_object_identifier,
LinkedObjectIdentifier::TextString("foo".to_owned())
);
}
_ => kms_bail!("There should be only one object"),
@ -326,7 +331,7 @@ pub(crate) async fn upsert<DB: Database>(
pub(crate) async fn crud<DB: Database>(
db_and_params: &(DB, Option<ExtraDatabaseParams>),
) -> KResult<()> {
cosmian_logger::log_utils::log_init(None);
log_init(None);
let db = &db_and_params.0;
let db_params = db_and_params.1.as_ref();
@ -385,7 +390,7 @@ pub(crate) async fn crud<DB: Database>(
let attributes = symmetric_key.attributes_mut()?;
attributes.link = Some(vec![Link {
link_type: LinkType::PreviousLink,
linked_object_identifier: LinkedObjectIdentifier::TextString("foo".to_string()),
linked_object_identifier: LinkedObjectIdentifier::TextString("foo".to_owned()),
}]);
db.update_object(
@ -407,11 +412,15 @@ pub(crate) async fn crud<DB: Database>(
1 => {
assert_eq!(StateEnumeration::Active, objs_[0].state);
assert_eq!(
objs_[0].object.attributes()?.link.as_ref().ok_or_else(
|| KmsError::ServerError("links should not be empty".to_string())
)?[0]
.linked_object_identifier,
LinkedObjectIdentifier::TextString("foo".to_string())
objs_[0]
.object
.attributes()?
.link
.as_ref()
.ok_or_else(|| KmsError::ServerError("links should not be empty".to_owned()))?
[0]
.linked_object_identifier,
LinkedObjectIdentifier::TextString("foo".to_owned())
);
}
_ => kms_bail!("There should be only one object"),

View file

@ -46,7 +46,7 @@ pub(crate) async fn find_attributes<DB: Database>(
// Define the link vector
let link = vec![Link {
link_type: LinkType::ParentLink,
linked_object_identifier: LinkedObjectIdentifier::TextString("foo".to_string()),
linked_object_identifier: LinkedObjectIdentifier::TextString("foo".to_owned()),
}];
let attributes = symmetric_key.attributes_mut()?;
@ -77,7 +77,7 @@ pub(crate) async fn find_attributes<DB: Database>(
assert_eq!(&symmetric_key, &objs_[0].object);
assert_eq!(
objs_[0].object.attributes()?.link.as_ref().unwrap()[0].linked_object_identifier,
LinkedObjectIdentifier::TextString("foo".to_string())
LinkedObjectIdentifier::TextString("foo".to_owned())
);
}
_ => kms_bail!("There should be one object"),
@ -103,7 +103,7 @@ pub(crate) async fn find_attributes<DB: Database>(
// Define a link vector not present in any database objects
let link = vec![Link {
link_type: LinkType::ParentLink,
linked_object_identifier: LinkedObjectIdentifier::TextString("bar".to_string()),
linked_object_identifier: LinkedObjectIdentifier::TextString("bar".to_owned()),
}];
let researched_attributes = Some(Attributes {

View file

@ -1,6 +1,10 @@
#![allow(clippy::unwrap_used, clippy::expect_used)]
use std::path::PathBuf;
use cosmian_kmip::crypto::{secret::Secret, symmetric::AES_256_GCM_KEY_LENGTH};
use cosmian_logger::log_utils::log_init;
use tempfile::TempDir;
use self::{
additional_redis_findex_tests::{test_corner_case, test_objects_db, test_permissions_db},
@ -35,7 +39,7 @@ fn get_redis_url() -> String {
if let Ok(var_env) = std::env::var("REDIS_HOST") {
format!("redis://{var_env}:6379")
} else {
"redis://localhost:6379".to_string()
"redis://localhost:6379".to_owned()
}
}
@ -131,8 +135,8 @@ pub(crate) async fn test_sql_cipher() -> KResult<()> {
#[tokio::test]
pub(crate) async fn test_sqlite() -> KResult<()> {
find_attributes(&get_sqlite().await?).await?;
json_access(&get_sqlite().await?).await?;
find_attributes(&get_sqlite().await?).await?;
owner(&get_sqlite().await?).await?;
permissions(&get_sqlite().await?).await?;
tags(&get_sqlite().await?, true).await?;
@ -159,14 +163,35 @@ pub(crate) async fn test_pgsql() -> KResult<()> {
#[tokio::test]
pub(crate) async fn test_mysql() -> KResult<()> {
crud(&get_mysql().await?).await?;
upsert(&get_mysql().await?).await?;
tx_and_list(&get_mysql().await?).await?;
atomic(&get_mysql().await?).await?;
log_init(None);
json_access(&get_mysql().await?).await?;
find_attributes(&get_mysql().await?).await?;
owner(&get_mysql().await?).await?;
permissions(&get_mysql().await?).await?;
tags(&get_mysql().await?, true).await?;
tx_and_list(&get_mysql().await?).await?;
atomic(&get_mysql().await?).await?;
upsert(&get_mysql().await?).await?;
crud(&get_mysql().await?).await?;
Ok(())
}
#[tokio::test]
pub(crate) async fn test_migrate_sqlite() -> KResult<()> {
log_init(None);
for sqlite_path in [
"src/tests/migrate/kms_4.12.0.sqlite",
"src/tests/migrate/kms_4.16.0.sqlite",
"src/tests/migrate/kms_4.17.0.sqlite",
] {
let tmp_dir = TempDir::new().unwrap();
let tmp_path = tmp_dir.path();
let tmp_file_path = tmp_path.join("kms.db");
if tmp_file_path.exists() {
std::fs::remove_file(&tmp_file_path)?;
}
std::fs::copy(sqlite_path, &tmp_file_path)?;
SqlitePool::instantiate(&tmp_file_path, false).await?;
}
Ok(())
}

View file

@ -144,8 +144,8 @@ pub(crate) async fn owner<DB: Database>(
.list_user_granted_access_rights(user_id_2, db_params)
.await?;
assert_eq!(
objects.get(&uid).unwrap(),
&(
objects[&uid],
(
String::from(owner),
StateEnumeration::Active,
vec![ObjectOperationType::Get].into_iter().collect(),

View file

@ -301,6 +301,7 @@ macro_rules! kms_bail {
};
}
#[allow(clippy::expect_used)]
#[cfg(test)]
mod tests {
use super::KmsError;
@ -312,16 +313,10 @@ mod tests {
assert_eq!("Unexpected server error: interpolate 42", err.to_string());
let err = bail();
assert_eq!(
"Unexpected server error: interpolate 43",
err.unwrap_err().to_string()
);
err.expect_err("Unexpected server error: interpolate 43");
let err = ensure();
assert_eq!(
"Unexpected server error: interpolate 44",
err.unwrap_err().to_string()
);
err.expect_err("Unexpected server error: interpolate 44");
}
fn bail() -> Result<(), KmsError> {

View file

@ -171,6 +171,10 @@ async fn start_https_kms_server(
* Returns a `Result` type that contains a `Server` instance if successful, or an error if
* something went wrong.
*
* # Errors
*
* This function can return the following errors:
* - `KmsError::ServerError` - If there is an error in the server configuration or preparation.
*/
pub async fn prepare_kms_server(
kms_server: Arc<KMS>,
@ -227,7 +231,7 @@ pub async fn prepare_kms_server(
let google_cse_jwt_config = if enable_google_cse {
let Some(jwks_manager) = jwks_manager else {
return Err(KmsError::ServerError(
"No JWKS manager to handle Google CSE JWT authorization".to_string(),
"No JWKS manager to handle Google CSE JWT authorization".to_owned(),
));
};
Some(GoogleCseConfig {

View file

@ -10,29 +10,46 @@
let_underscore,
rust_2024_compatibility,
unreachable_pub,
unused,
clippy::all,
clippy::suspicious,
clippy::complexity,
clippy::perf,
clippy::style,
clippy::pedantic,
clippy::cargo
clippy::cargo,
// restriction lints
clippy::unwrap_used,
clippy::get_unwrap,
clippy::expect_used,
// clippy::indexing_slicing,
clippy::unwrap_in_result,
clippy::assertions_on_result_states,
clippy::panic,
clippy::panic_in_result_fn,
clippy::renamed_function_params,
clippy::verbose_file_reads,
clippy::str_to_string,
clippy::string_to_string,
clippy::unreachable,
clippy::as_conversions,
clippy::print_stdout,
clippy::empty_structs_with_brackets,
clippy::unseparated_literal_suffix,
clippy::map_err_ignore,
)]
#![allow(
clippy::module_name_repetitions,
clippy::similar_names,
clippy::missing_errors_doc,
clippy::missing_panics_doc,
clippy::too_many_lines,
clippy::cast_possible_wrap,
clippy::cast_sign_loss,
clippy::cast_possible_truncation,
clippy::cargo_common_metadata,
clippy::multiple_crate_versions
)]
pub mod config;
pub mod core;
#[allow(clippy::expect_used)]
pub mod database;
pub mod error;
pub mod kms_server;
@ -43,5 +60,6 @@ pub mod telemetry;
pub use database::KMSServer;
#[allow(clippy::panic, clippy::unwrap_used, clippy::expect_used)]
#[cfg(test)]
mod tests;

View file

@ -26,12 +26,8 @@ pub(crate) async fn manage_api_token_request<S, B>(
where
S: Service<ServiceRequest, Response = ServiceResponse<B>, Error = Error>,
{
trace!("API Token Authentication...");
match manage_api_token(kms_server, &req).await {
Ok(()) => {
trace!("API Token Authentication successful");
Ok(service.call(req).await?.map_into_left_body())
}
Ok(()) => Ok(service.call(req).await?.map_into_left_body()),
Err(e) => {
error!("{:?} {} 401 unauthorized: {e:?}", req.method(), req.path(),);
Ok(req
@ -86,21 +82,15 @@ async fn get_api_token(kms_server: &Arc<KMS>, api_token_id: &str) -> KResult<Str
}
async fn manage_api_token(kms_server: Arc<KMS>, req: &ServiceRequest) -> KResult<()> {
trace!(
"Token authentication using this API token ID: {:?}",
kms_server.params.api_token_id
);
match &kms_server.params.api_token_id {
Some(api_token_id) => {
trace!("Token authentication using this API token ID: {api_token_id}");
let api_token = get_api_token(&kms_server, api_token_id.as_str()).await?;
let client_token = req
.headers()
.get(header::AUTHORIZATION)
.ok_or_else(|| {
KmsError::InvalidRequest("Missing Authorization header".to_string())
})?
.ok_or_else(|| KmsError::InvalidRequest("Missing Authorization header".to_owned()))?
.to_str()
.map_err(|e| {
KmsError::InvalidRequest(format!(
@ -131,7 +121,7 @@ async fn manage_api_token(kms_server: Arc<KMS>, req: &ServiceRequest) -> KResult
req.path(),
);
Err(KmsError::Unauthorized(
"Client and server authentication tokens mismatch".to_string(),
"Client and server authentication tokens mismatch".to_owned(),
))
}
}

View file

@ -3,7 +3,7 @@ use std::{collections::HashMap, sync::RwLock};
use alcoholic_jwt::{JWK, JWKS};
use chrono::{DateTime, Duration, Utc};
use crate::result::KResult;
use crate::{error::KmsError, result::KResult};
static REFRESH_INTERVAL: i64 = 60; // in secs
@ -27,19 +27,23 @@ impl JwksManager {
}
/// Lock `jwks` to replace it
fn set_jwks(&self, new_jwks: HashMap<String, JWKS>) {
let mut jwks = self.jwks.write().expect("cannot lock JWKS for write");
fn set_jwks(&self, new_jwks: HashMap<String, JWKS>) -> KResult<()> {
let mut jwks = self.jwks.write().map_err(|e| {
KmsError::ServerError(format!("cannot lock JWKS for write. Error: {e:?}"))
})?;
*jwks = new_jwks;
Ok(())
}
/// Find the key identifier `kid` in each registered JWKS
pub fn find(&self, kid: &str) -> Option<JWK> {
self.jwks
pub fn find(&self, kid: &str) -> KResult<Option<JWK>> {
Ok(self
.jwks
.read()
.expect("cannot lock JWKS for read")
.map_err(|e| KmsError::ServerError(format!("cannot lock JWKS for read. Error: {e:?}")))?
.iter()
.find_map(|(_, jwks)| jwks.find(kid))
.cloned()
.cloned())
}
/// Fetch again all JWKS using the `uris`.
@ -47,10 +51,9 @@ impl JwksManager {
/// The threshold to refresh JWKS is set to `REFRESH_INTERVAL`.
pub async fn refresh(&self) -> KResult<()> {
let refresh_is_allowed = {
let mut last_update = self
.last_update
.write()
.expect("cannot lock last_update for write");
let mut last_update = self.last_update.write().map_err(|e| {
KmsError::ServerError(format!("cannot lock last_update for write. Error: {e:?}"))
})?;
let can_be_refreshed = last_update.map_or(true, |lu| {
(lu + Duration::seconds(REFRESH_INTERVAL)) < Utc::now()
@ -65,7 +68,7 @@ impl JwksManager {
if refresh_is_allowed {
tracing::info!("Refreshing JWKS");
let refreshed_jwks = Self::fetch_all(&self.uris).await;
self.set_jwks(refreshed_jwks);
self.set_jwks(refreshed_jwks)?;
}
Ok(())

View file

@ -68,12 +68,12 @@ impl JwtConfig {
);
tracing::trace!(
"validating authentication token, expected JWT issuer: {}",
self.jwt_issuer_uri.to_string()
self.jwt_issuer_uri
);
let mut validations = vec![
#[cfg(not(test))]
alcoholic_jwt::Validation::Issuer(self.jwt_issuer_uri.to_string()),
alcoholic_jwt::Validation::Issuer(self.jwt_issuer_uri.clone()),
alcoholic_jwt::Validation::SubjectPresent,
#[cfg(not(feature = "insecure"))]
alcoholic_jwt::Validation::NotExpired,
@ -88,14 +88,14 @@ impl JwtConfig {
// needs to be fetched from the token headers.
let kid = token_kid(token)
.map_err(|e| KmsError::Unauthorized(format!("Failed to decode kid: {e}")))?
.ok_or_else(|| KmsError::Unauthorized("No 'kid' claim present in token".to_string()))?;
.ok_or_else(|| KmsError::Unauthorized("No 'kid' claim present in token".to_owned()))?;
tracing::trace!("looking for kid `{kid}` JWKS:\n{:?}", self.jwks);
let jwk = self
.jwks
.find(&kid)
.ok_or_else(|| KmsError::Unauthorized("Specified key not found in set".to_string()))?;
.find(&kid)?
.ok_or_else(|| KmsError::Unauthorized("Specified key not found in set".to_owned()))?;
tracing::trace!("JWK has been found:\n{jwk:?}");

View file

@ -21,7 +21,7 @@ pub(crate) async fn manage_jwt_request<S, B>(
where
S: Service<ServiceRequest, Response = ServiceResponse<B>, Error = Error>,
{
trace!("JWT Authentication...");
trace!("Starting JWT Authentication...");
match manage_jwt(configs, &req).await {
Ok(auth_claim) => {
req.extensions_mut().insert(auth_claim);

View file

@ -51,7 +51,6 @@ where
type Transform = AuthMiddleware<S>;
fn new_transform(&self, service: S) -> Self::Future {
debug!("JWT/Token Authentication enabled");
ok(AuthMiddleware {
service: Rc::new(service),
jwt_configurations: self.jwt_configurations.clone(),
@ -76,8 +75,8 @@ where
type Future = Pin<Box<dyn Future<Output = Result<Self::Response, Self::Error>>>>;
type Response = ServiceResponse<EitherBody<B, BoxBody>>;
fn poll_ready(&self, cx: &mut Context) -> Poll<Result<(), Self::Error>> {
self.service.poll_ready(cx)
fn poll_ready(&self, ctx: &mut Context) -> Poll<Result<(), Self::Error>> {
self.service.poll_ready(ctx)
}
fn call(&self, req: ServiceRequest) -> Self::Future {

View file

@ -97,9 +97,9 @@ where
type Response = ServiceResponse<EitherBody<B, BoxBody>>;
/// Poll the `SslAuthMiddleware` for readiness.
fn poll_ready(&self, cx: &mut Context) -> Poll<Result<(), Self::Error>> {
fn poll_ready(&self, ctx: &mut Context) -> Poll<Result<(), Self::Error>> {
// Poll the underlying service for readiness.
self.service.poll_ready(cx)
self.service.poll_ready(ctx)
}
/// Call the `SslAuthMiddleware`.

View file

@ -4,9 +4,27 @@ use crate::error::KmsError;
pub type KResult<R> = Result<R, KmsError>;
/// A helper trait for `KResult` that provides additional methods for error handling.
pub trait KResultHelper<T> {
/// Sets the reason for the error.
///
/// # Errors
///
/// Returns a `KResult` with the specified `ErrorReason` if the original result is an error.
fn reason(self, reason: ErrorReason) -> KResult<T>;
/// Sets the context for the error.
///
/// # Errors
///
/// Returns a `KResult` with the specified context if the original result is an error.
fn context(self, context: &str) -> KResult<T>;
/// Sets the context for the error using a closure.
///
/// # Errors
///
/// Returns a `KResult` with the context returned by the closure if the original result is an error.
fn with_context<O>(self, op: O) -> KResult<T>
where
O: FnOnce() -> String;
@ -34,7 +52,7 @@ where
impl<T> KResultHelper<T> for Option<T> {
fn context(self, context: &str) -> KResult<T> {
self.ok_or_else(|| KmsError::ServerError(context.to_string()))
self.ok_or_else(|| KmsError::ServerError(context.to_owned()))
}
fn with_context<O>(self, op: O) -> KResult<T>

View file

@ -47,8 +47,7 @@ fn jwt_authorization_config_application(
));
let jwt_audience = Some(
std::env::var("KMS_GOOGLE_CSE_AUDIENCE")
.unwrap_or_else(|_| "cse-authorization".to_string()),
std::env::var("KMS_GOOGLE_CSE_AUDIENCE").unwrap_or_else(|_| "cse-authorization".to_owned()),
);
Arc::new(JwtConfig {
@ -66,7 +65,7 @@ pub fn jwt_authorization_config(
.iter()
.map(|app| {
(
(*app).to_string(),
(*app).to_owned(),
jwt_authorization_config_application(app, jwks_manager.clone()),
)
})
@ -97,24 +96,26 @@ pub(crate) fn decode_jwt_authorization_token(
KmsError::ServerError(
"JWT audience should be configured with Google Workspace client-side \
encryption"
.to_string(),
.to_owned(),
)
})?
.to_string(),
),
alcoholic_jwt::Validation::Issuer(jwt_config.jwt_issuer_uri.to_string()),
alcoholic_jwt::Validation::Issuer(jwt_config.jwt_issuer_uri.clone()),
];
// If a JWKS contains multiple keys, the correct KID first
// needs to be fetched from the token headers.
let kid = token_kid(token)
.map_err(|_| KmsError::Unauthorized("Failed to decode token headers".to_string()))?
.ok_or_else(|| KmsError::Unauthorized("No 'kid' claim present in token".to_string()))?;
.map_err(|e| {
KmsError::Unauthorized(format!("Failed to decode token headers. Error: {e:?}"))
})?
.ok_or_else(|| KmsError::Unauthorized("No 'kid' claim present in token".to_owned()))?;
tracing::trace!("looking for kid `{kid}` JWKS:\n{:?}", jwt_config.jwks);
let jwk = &jwt_config.jwks.find(&kid).ok_or_else(|| {
KmsError::Unauthorized("[Google CSE auth] Specified key not found in set".to_string())
let jwk = &jwt_config.jwks.find(&kid)?.ok_or_else(|| {
KmsError::Unauthorized("[Google CSE auth] Specified key not found in set".to_owned())
})?;
tracing::trace!("JWK has been found:\n{jwk:?}");
@ -155,7 +156,7 @@ pub(crate) async fn validate_tokens(
let cse_config = cse_config.as_ref().ok_or_else(|| {
KmsError::ServerError(
"JWT authentication and authorization configurations for Google CSE are not set"
.to_string(),
.to_owned(),
)
})?;
@ -188,26 +189,26 @@ pub(crate) async fn validate_tokens(
// The emails should match (case insensitive)
let authentication_email = authentication_token.email.ok_or_else(|| {
KmsError::Unauthorized("Authentication token should contain an email".to_string())
KmsError::Unauthorized("Authentication token should contain an email".to_owned())
})?;
let authorization_email = authorization_token.email.ok_or_else(|| {
KmsError::Unauthorized("Authorization token should contain an email".to_string())
KmsError::Unauthorized("Authorization token should contain an email".to_owned())
})?;
kms_ensure!(
authorization_email == authentication_email,
KmsError::Unauthorized(
"Authentication and authorization emails in tokens do not match".to_string()
"Authentication and authorization emails in tokens do not match".to_owned()
)
);
if let Some(roles) = roles {
let role = authorization_token.role.ok_or_else(|| {
KmsError::Unauthorized("Authorization token should contain a role".to_string())
KmsError::Unauthorized("Authorization token should contain a role".to_owned())
})?;
kms_ensure!(
roles.contains(&role.as_str()),
KmsError::Unauthorized(
"Authorization token should contain a role of writer or owner".to_string()
"Authorization token should contain a role of writer or owner".to_owned()
)
);
}
@ -229,6 +230,7 @@ pub(crate) async fn validate_tokens(
#[cfg(test)]
mod tests {
#![allow(clippy::unwrap_used)]
use std::sync::Arc;
use tracing::info;
@ -276,8 +278,8 @@ mod tests {
let client_id = std::env::var("TEST_GOOGLE_OAUTH_CLIENT_ID").unwrap();
// Test authentication
let jwt_authentication_config = JwtAuthConfig {
jwt_issuer_uri: Some(vec![JWT_ISSUER_URI.to_string()]),
jwks_uri: Some(vec![JWKS_URI.to_string()]),
jwt_issuer_uri: Some(vec![JWT_ISSUER_URI.to_owned()]),
jwks_uri: Some(vec![JWKS_URI.to_owned()]),
jwt_audience: Some(vec![client_id]),
};
let jwt_authentication_config = JwtConfig {
@ -292,17 +294,17 @@ mod tests {
info!("AUTHENTICATION token: {:?}", authentication_token);
assert_eq!(
authentication_token.iss,
Some("https://accounts.google.com".to_string())
Some("https://accounts.google.com".to_owned())
);
assert_eq!(
authentication_token.email,
Some("blue@cosmian.com".to_string())
Some("blue@cosmian.com".to_owned())
);
assert_eq!(
authentication_token.aud,
Some(
"764086051850-6qr4p6gpi6hn506pt8ejuq83di341hur.apps.googleusercontent.com"
.to_string()
.to_owned()
)
);
@ -317,7 +319,7 @@ mod tests {
tracing::trace!("{jwt_authorization_config:#?}");
let (authorization_token, jwt_headers) = decode_jwt_authorization_token(
jwt_authorization_config.get("drive").unwrap(),
&jwt_authorization_config["drive"],
&wrap_request.authorization,
)
.unwrap();
@ -326,14 +328,14 @@ mod tests {
assert_eq!(
authorization_token.email,
Some("blue@cosmian.com".to_string())
Some("blue@cosmian.com".to_owned())
);
// prev: Some("cse-authorization".to_string())
// prev: Some("cse-authorization".to_owned())
assert_eq!(
authorization_token.aud,
Some(
"764086051850-6qr4p6gpi6hn506pt8ejuq83di341hur.apps.googleusercontent.com"
.to_string()
.to_owned()
)
);
}

View file

@ -29,7 +29,7 @@ impl CseErrorReply {
fn from(e: &KmsError) -> Self {
Self {
code: e.status_code().as_u16(),
message: "A CSE request to the Cosmian KMS failed".to_string(),
message: "A CSE request to the Cosmian KMS failed".to_owned(),
details: e.to_string(),
}
}

View file

@ -41,26 +41,30 @@ pub struct StatusResponse {
pub operations_supported: Vec<String>,
}
/// Returns the status of the server.
///
/// # Returns
/// - `StatusResponse`: The status of the server.
#[must_use]
pub fn get_status() -> StatusResponse {
debug!("get_status");
StatusResponse {
server_type: "KACLS".to_string(),
vendor_id: "Cosmian".to_string(),
version: crate_version!().to_string(),
name: "Cosmian KMS".to_string(),
server_type: "KACLS".to_owned(),
vendor_id: "Cosmian".to_owned(),
version: crate_version!().to_owned(),
name: "Cosmian KMS".to_owned(),
operations_supported: vec![
"digest".to_string(),
"privatekeydecrypt".to_string(),
"privatekeysign".to_string(),
"privilegedprivatekeydecrypt".to_string(),
"privilegedunwrap".to_string(),
"privilegedwrap".to_string(),
"rewrap".to_string(),
"status".to_string(),
"unwrap".to_string(),
"wrap".to_string(),
"wrapprivatekey".to_string(),
"digest".to_owned(),
"privatekeydecrypt".to_owned(),
"privatekeysign".to_owned(),
"privilegedprivatekeydecrypt".to_owned(),
"privilegedunwrap".to_owned(),
"privilegedwrap".to_owned(),
"rewrap".to_owned(),
"status".to_owned(),
"unwrap".to_owned(),
"wrap".to_owned(),
"wrapprivatekey".to_owned(),
],
}
}
@ -78,10 +82,21 @@ pub struct WrapResponse {
pub wrapped_key: String,
}
/// Returns encrypted Data Encryption Key (DEK) and associated data.
/// Wraps a Data Encryption Key (DEK) using the specified authentication and authorization tokens.
///
/// See [doc](https://developers.google.com/workspace/cse/reference/wrap) and
/// for more details, see [Encrypt & decrypt data](https://developers.google.com/workspace/cse/guides/encrypt-and-decrypt-data)
/// # Arguments
/// - `req_http`: The HTTP request.
/// - `wrap_request`: The wrap request.
/// - `cse_config`: The Google CSE configuration.
/// - `kms`: The KMS server.
///
/// # Returns
/// - `WrapResponse`: The wrapped key.
///
/// # Errors
/// This function can return an error if there is a problem with the encryption process or if the tokens validation fails.
pub async fn wrap(
req_http: HttpRequest,
wrap_request: WrapRequest,
@ -125,7 +140,7 @@ pub async fn wrap(
wrapping_method: kmip_types::WrappingMethod::Encrypt,
encoding_option: Some(EncodingOption::NoEncoding),
encryption_key_information: Some(kmip_types::EncryptionKeyInformation {
unique_identifier: UniqueIdentifier::TextString("[\"google_cse\"]".to_string()),
unique_identifier: UniqueIdentifier::TextString("[\"google_cse\"]".to_owned()),
cryptographic_parameters: Some(Box::default()),
}),
..Default::default()
@ -158,10 +173,21 @@ pub struct UnwrapResponse {
pub key: String,
}
/// Decrypt the Data Encryption Key (DEK) and associated data.
/// Unwraps a wrapped Data Encryption Key (DEK) using the specified authentication and authorization tokens.
///
/// See [doc](https://developers.google.com/workspace/cse/reference/wrap) and
/// for more details, see [Encrypt & decrypt data](https://developers.google.com/workspace/cse/guides/encrypt-and-decrypt-data)
/// # Arguments
/// - `req_http`: The HTTP request.
/// - `unwrap_request`: The unwrap request.
/// - `cse_config`: The Google CSE configuration.
/// - `kms`: The KMS server.
///
/// # Returns
/// - `UnwrapResponse`: The unwrapped key.
///
/// # Errors
/// This function can return an error if there is a problem with the decryption process or if the tokens validation fails.
pub async fn unwrap(
req_http: HttpRequest,
unwrap_request: UnwrapRequest,
@ -201,7 +227,7 @@ pub async fn unwrap(
wrapped_dek.key_block_mut()?.key_wrapping_data = Some(Box::new(KeyWrappingData {
wrapping_method: kmip_types::WrappingMethod::Encrypt,
encryption_key_information: Some(kmip_types::EncryptionKeyInformation {
unique_identifier: UniqueIdentifier::TextString("[\"google_cse\"]".to_string()),
unique_identifier: UniqueIdentifier::TextString("[\"google_cse\"]".to_owned()),
cryptographic_parameters: None,
}),
encoding_option: Some(EncodingOption::NoEncoding),
@ -263,6 +289,17 @@ pub struct PrivateKeySignResponse {
/// See Google documentation:
/// - Private Key Sign endpoint: <https://developers.google.com/workspace/cse/reference/private-key-sign>
/// - S/MIME certificate profiles: <https://support.google.com/a/answer/7300887>
/// # Arguments
/// - `req_http`: The HTTP request.
/// - `request`: The private key sign request.
/// - `cse_config`: The Google CSE configuration.
/// - `kms`: The KMS server.
///
/// # Returns
/// - `PrivateKeySignResponse`: The signature.
///
/// # Errors
/// This function can return an error if there is a problem with the encryption process or if the tokens validation fails.
pub async fn private_key_sign(
req_http: HttpRequest,
request: PrivateKeySignRequest,
@ -351,6 +388,18 @@ pub struct PrivateKeyDecryptResponse {
///
/// See Google documentation:
/// - Private Key Decrypt endpoint: <https://developers.google.com/workspace/cse/reference/private-key-decrypt>
///
/// # Arguments
/// - `req_http`: The HTTP request.
/// - `request`: The private key decrypt request.
/// - `cse_config`: The Google CSE configuration.
/// - `kms`: The KMS server.
///
/// # Returns
/// - `PrivateKeyDecryptResponse`: The decrypted data encryption key.
///
/// # Errors
/// This function can return an error if there is a problem with the decryption process or if the tokens validation fails.
pub async fn private_key_decrypt(
req_http: HttpRequest,
request: PrivateKeyDecryptRequest,
@ -443,7 +492,7 @@ async fn cse_symmetric_unwrap(
KeyWrappingData {
wrapping_method: kmip_types::WrappingMethod::Encrypt,
encryption_key_information: Some(kmip_types::EncryptionKeyInformation {
unique_identifier: UniqueIdentifier::TextString("google_cse".to_string()),
unique_identifier: UniqueIdentifier::TextString("google_cse".to_owned()),
cryptographic_parameters: None,
}),
encoding_option: Some(EncodingOption::TTLVEncoding),

View file

@ -17,7 +17,7 @@ use crate::{
result::KResult,
};
/// Generate KMIP generic key pair
/// Generate KMIP JSON TTLV and send it to the KMIP server
#[post("/kmip/2_1")]
pub(crate) async fn kmip(
req_http: HttpRequest,

View file

@ -77,5 +77,5 @@ pub(crate) async fn get_version(
kms: Data<Arc<KMSServer>>,
) -> KResult<Json<String>> {
info!("GET /version {}", kms.get_user(&req));
Ok(Json(crate_version!().to_string()))
Ok(Json(crate_version!().to_owned()))
}

View file

@ -90,7 +90,7 @@ pub(crate) async fn version(
kms: Data<Arc<KMSServer>>,
) -> KResult<Json<String>> {
info!("GET /version {}", kms.get_user(&req_http));
Ok(Json(crate_version!().to_string()))
Ok(Json(crate_version!().to_owned()))
}
#[get("/{key_name}")]
@ -101,7 +101,7 @@ pub(crate) async fn get_key(
) -> HttpResponse {
let mut key_name = path.into_inner();
if key_name.is_empty() {
key_name = "dke_key".to_string();
"dke_key".clone_into(&mut key_name);
}
match _get_key(&key_name, req_http, &kms).await {
Ok(key_data) => {
@ -148,7 +148,7 @@ async fn _get_key(key_tag: &str, req_http: HttpRequest, kms: &Arc<KMSServer>) ->
is not supported"
)
})?;
let mut existing_path = dke_service_url.path().to_string();
let mut existing_path = dke_service_url.path().to_owned();
// remove the trailing / if any
if existing_path.ends_with('/') {
existing_path.pop();
@ -205,10 +205,7 @@ pub(crate) async fn decrypt(
kms: Data<Arc<KMSServer>>,
) -> HttpResponse {
let encrypted_data = wrap_request.into_inner();
info!(
"Encrypted Data : {}",
serde_json::to_string(&encrypted_data).unwrap()
);
info!("Encrypted Data : {encrypted_data:?}",);
let (key_name, key_id) = path.into_inner();
// let _key_id = key_id.into_inner();
trace!("POST /{}/{}/Decrypt {:?}", key_name, key_id, encrypted_data);
@ -255,13 +252,14 @@ fn big_uint_to_u32(bu: &BigUint) -> u32 {
let bytes = bu.to_bytes_be();
let len = bytes.len();
let min = std::cmp::min(4, len);
let mut padded = [0u8; 4];
let mut padded = [0_u8; 4];
padded[4 - min..].copy_from_slice(&bytes[len - min..]);
u32::from_be_bytes(padded)
}
#[cfg(test)]
mod tests {
#![allow(clippy::unwrap_used)]
use chrono::{DateTime, Utc};
use num_bigint_dig::BigUint;

View file

@ -20,6 +20,14 @@ pub struct TelemetryConfig {
}
/// Initialize the telemetry system
///
/// # Arguments
///
/// * `clap_config` - The `ClapConfig` object containing the telemetry configuration
///
/// # Errors
///
/// Returns an error if there is an issue initializing the telemetry system.
pub fn initialize_telemetry(clap_config: &ClapConfig) -> KResult<()> {
let config = &clap_config.telemetry;
let (filter, _reload_handle) =

View file

@ -76,7 +76,7 @@ async fn integration_tests_use_ids_no_tags() -> KResult<()> {
let request = build_encryption_request(
public_key_unique_identifier,
Some(encryption_policy.to_string()),
Some(encryption_policy.to_owned()),
data.to_vec(),
Some(header_metadata.clone()),
Some(authentication_data.clone()),
@ -135,7 +135,7 @@ async fn integration_tests_use_ids_no_tags() -> KResult<()> {
let request = build_encryption_request(
public_key_unique_identifier,
Some(encryption_policy.to_string()),
Some(encryption_policy.to_owned()),
data.to_vec(),
None,
Some(authentication_data.clone()),
@ -224,7 +224,7 @@ async fn integration_tests_use_ids_no_tags() -> KResult<()> {
&app,
&Revoke {
unique_identifier: Some(UniqueIdentifier::TextString(
user_decryption_key_identifier_1.to_string(),
user_decryption_key_identifier_1.to_owned(),
)),
revocation_reason: RevocationReason::TextString("Revocation test".to_owned()),
compromise_occurrence_date: None,
@ -234,7 +234,7 @@ async fn integration_tests_use_ids_no_tags() -> KResult<()> {
//
// Rekey all key pairs with matching access policy
let ap_to_edit = "Department::MKG".to_string();
let ap_to_edit = "Department::MKG".to_owned();
let request = build_rekey_keypair_request(
private_key_unique_identifier,
RekeyEditAction::RekeyAccessPolicy(ap_to_edit.clone()),
@ -262,7 +262,7 @@ async fn integration_tests_use_ids_no_tags() -> KResult<()> {
let request = build_encryption_request(
public_key_unique_identifier,
Some(encryption_policy.to_string()),
Some(encryption_policy.to_owned()),
data.to_vec(),
None,
Some(authentication_data.clone()),
@ -286,7 +286,7 @@ async fn integration_tests_use_ids_no_tags() -> KResult<()> {
None,
);
let post_ttlv_decrypt: KResult<DecryptResponse> = test_utils::post(&app, &request).await;
assert!(post_ttlv_decrypt.is_err());
post_ttlv_decrypt.unwrap_err();
// decrypt
let request = build_decryption_request(
@ -316,7 +316,7 @@ async fn integration_tests_use_ids_no_tags() -> KResult<()> {
)?;
let rekey_keypair_response: KResult<ReKeyKeyPairResponse> =
test_utils::post(&app, &request).await;
assert!(rekey_keypair_response.is_ok());
rekey_keypair_response.unwrap();
// test user2 can no longer decrypt old message
let request = build_decryption_request(
@ -328,7 +328,7 @@ async fn integration_tests_use_ids_no_tags() -> KResult<()> {
None,
);
let post_ttlv_decrypt: KResult<DecryptResponse> = test_utils::post(&app, &request).await;
assert!(post_ttlv_decrypt.is_err());
post_ttlv_decrypt.unwrap_err();
//
// Add new Attributes
@ -348,7 +348,7 @@ async fn integration_tests_use_ids_no_tags() -> KResult<()> {
)?;
let rekey_keypair_response: KResult<ReKeyKeyPairResponse> =
test_utils::post(&app, &request).await;
assert!(rekey_keypair_response.is_ok());
rekey_keypair_response.unwrap();
// Encrypt for new attribute
let data = "New tech research data".as_bytes();
@ -356,7 +356,7 @@ async fn integration_tests_use_ids_no_tags() -> KResult<()> {
let request = build_encryption_request(
public_key_unique_identifier,
Some(encryption_policy.to_string()),
Some(encryption_policy.to_owned()),
data.to_vec(),
None,
Some(authentication_data.clone()),
@ -366,13 +366,13 @@ async fn integration_tests_use_ids_no_tags() -> KResult<()> {
}),
)?;
let encrypt_response: KResult<EncryptResponse> = test_utils::post(&app, &request).await;
assert!(encrypt_response.is_ok());
encrypt_response.unwrap();
//
// Rename Attributes
let rename_policy_attributes_pair = vec![(
Attribute::from(("Department", "HR")),
"HumanResources".to_string(),
"HumanResources".to_owned(),
)];
let request = build_rekey_keypair_request(
private_key_unique_identifier,
@ -380,7 +380,7 @@ async fn integration_tests_use_ids_no_tags() -> KResult<()> {
)?;
let rekey_keypair_response: KResult<ReKeyKeyPairResponse> =
test_utils::post(&app, &request).await;
assert!(rekey_keypair_response.is_ok());
rekey_keypair_response.unwrap();
// Encrypt for renamed attribute
let data = "hr data".as_bytes();
@ -388,7 +388,7 @@ async fn integration_tests_use_ids_no_tags() -> KResult<()> {
let request = build_encryption_request(
public_key_unique_identifier,
Some(encryption_policy.to_string()),
Some(encryption_policy.to_owned()),
data.to_vec(),
None,
Some(authentication_data.clone()),
@ -398,7 +398,7 @@ async fn integration_tests_use_ids_no_tags() -> KResult<()> {
}),
)?;
let encrypt_response: KResult<EncryptResponse> = test_utils::post(&app, &request).await;
assert!(encrypt_response.is_ok());
encrypt_response.unwrap();
//
// Disable ABE Attribute
@ -409,7 +409,7 @@ async fn integration_tests_use_ids_no_tags() -> KResult<()> {
)?;
let rekey_keypair_response: KResult<ReKeyKeyPairResponse> =
test_utils::post(&app, &request).await;
assert!(rekey_keypair_response.is_ok());
rekey_keypair_response.unwrap();
// Encrypt with disabled ABE attribute will fail
let authentication_data = b"cc the uid".to_vec();
@ -418,7 +418,7 @@ async fn integration_tests_use_ids_no_tags() -> KResult<()> {
let request = build_encryption_request(
public_key_unique_identifier,
Some(encryption_policy.to_string()),
Some(encryption_policy.to_owned()),
data.to_vec(),
None,
Some(authentication_data.clone()),
@ -428,7 +428,7 @@ async fn integration_tests_use_ids_no_tags() -> KResult<()> {
}),
)?;
let encrypt_response: KResult<EncryptResponse> = test_utils::post(&app, &request).await;
assert!(encrypt_response.is_err());
encrypt_response.unwrap_err();
//
// Delete attribute
@ -439,7 +439,7 @@ async fn integration_tests_use_ids_no_tags() -> KResult<()> {
)?;
let rekey_keypair_response: KResult<ReKeyKeyPairResponse> =
test_utils::post(&app, &request).await;
assert!(rekey_keypair_response.is_ok());
rekey_keypair_response.unwrap();
// Encrypt for removed attribute will fail
let data = "New hr data".as_bytes();
@ -447,7 +447,7 @@ async fn integration_tests_use_ids_no_tags() -> KResult<()> {
let request = build_encryption_request(
public_key_unique_identifier,
Some(encryption_policy.to_string()),
Some(encryption_policy.to_owned()),
data.to_vec(),
None,
Some(authentication_data.clone()),
@ -457,7 +457,7 @@ async fn integration_tests_use_ids_no_tags() -> KResult<()> {
}),
)?;
let encrypt_response: KResult<EncryptResponse> = test_utils::post(&app, &request).await;
assert!(encrypt_response.is_err());
encrypt_response.unwrap_err();
//
// Destroy user decryption key

View file

@ -47,7 +47,7 @@ async fn test_re_key_with_tags() -> KResult<()> {
// Re_key all key pairs with matching access policy
let request = build_rekey_keypair_request(
&mkp_json_tag,
RekeyEditAction::RekeyAccessPolicy("Department::MKG".to_string()),
RekeyEditAction::RekeyAccessPolicy("Department::MKG".to_owned()),
)?;
let rekey_keypair_response: ReKeyKeyPairResponse = test_utils::post(&app, &request).await?;
assert_eq!(
@ -65,7 +65,7 @@ async fn test_re_key_with_tags() -> KResult<()> {
let encryption_policy = "Level::Confidential && Department::MKG";
let request = build_encryption_request(
&mkp_json_tag,
Some(encryption_policy.to_string()),
Some(encryption_policy.to_owned()),
data.to_vec(),
None,
Some(authentication_data.clone()),
@ -127,7 +127,7 @@ async fn integration_tests_with_tags() -> KResult<()> {
let request = build_encryption_request(
&mkp_json_tag,
Some(encryption_policy.to_string()),
Some(encryption_policy.to_owned()),
data.to_vec(),
Some(header_metadata.clone()),
Some(authentication_data.clone()),
@ -178,7 +178,7 @@ async fn integration_tests_with_tags() -> KResult<()> {
let request = build_encryption_request(
&mkp_json_tag,
Some(encryption_policy.to_string()),
Some(encryption_policy.to_owned()),
data.to_vec(),
None,
Some(authentication_data.clone()),
@ -253,7 +253,7 @@ async fn integration_tests_with_tags() -> KResult<()> {
let _revoke_response: RevokeResponse = test_utils::post(
&app,
&Revoke {
unique_identifier: Some(UniqueIdentifier::TextString(udk1_json_tag.to_string())),
unique_identifier: Some(UniqueIdentifier::TextString(udk1_json_tag.clone())),
revocation_reason: RevocationReason::TextString("Revocation test".to_owned()),
compromise_occurrence_date: None,
},
@ -264,7 +264,7 @@ async fn integration_tests_with_tags() -> KResult<()> {
// Rekey all key pairs with matching access policy
let request = build_rekey_keypair_request(
&mkp_json_tag,
RekeyEditAction::RekeyAccessPolicy("Department::MKG".to_string()),
RekeyEditAction::RekeyAccessPolicy("Department::MKG".to_owned()),
)?;
let rekey_keypair_response: ReKeyKeyPairResponse = test_utils::post(&app, &request).await?;
assert_eq!(
@ -282,7 +282,7 @@ async fn integration_tests_with_tags() -> KResult<()> {
let encryption_policy = "Level::Confidential && Department::MKG";
let request = build_encryption_request(
&mkp_json_tag,
Some(encryption_policy.to_string()),
Some(encryption_policy.to_owned()),
data.to_vec(),
None,
Some(authentication_data.clone()),
@ -303,7 +303,7 @@ async fn integration_tests_with_tags() -> KResult<()> {
None,
);
let post_ttlv_decrypt: KResult<DecryptResponse> = test_utils::post(&app, &request).await;
assert!(post_ttlv_decrypt.is_err());
post_ttlv_decrypt.unwrap_err();
// decrypt
let request = build_decryption_request(

Some files were not shown because too many files have changed in this diff Show more