diff --git a/CHANGELOG.md b/CHANGELOG.md index 68fbd577..24775dbb 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,11 +11,19 @@ All notable changes to this project will be documented in this file. ### Changed - BREAKING: `configOverrides` now only accepts the supported config file names `runtime.properties`, `jvm.config` and `security.properties`. Previously arbitrary keys were silently accepted but ignored ([#813]). +- BREAKING: Implement generic database connection for metadata storage ([#814]). + Renamed CRD: `metadataStorageDatabase` -> `metadataDatabase`. + The `metadataDatabase` has subfields according to the supported db types: `postgresql`, `mysql` and `derby`. - Bump `stackable-operator` to 0.110.1 and `kube` to 3.1.0 ([#813]). - Document Helm deployed RBAC permissions and remove unnecessary permissions ([#810]). +### Deleted + +- Removed all metadata storage related properties from product config ([#814]). + [#810]: https://github.com/stackabletech/druid-operator/pull/810 [#813]: https://github.com/stackabletech/druid-operator/pull/813 +[#814]: https://github.com/stackabletech/druid-operator/pull/814 ## [26.3.0] - 2026-03-16 diff --git a/Cargo.lock b/Cargo.lock index e2587fbc..1218e4ec 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1539,7 +1539,7 @@ dependencies = [ [[package]] name = "k8s-version" version = "0.1.3" -source = "git+https://github.com/stackabletech/operator-rs.git?tag=stackable-operator-0.110.1#96f42571ea185a3cd76fedde351fcabbeefcae16" +source = "git+https://github.com/stackabletech//operator-rs.git?branch=feat%2Fderby-host-part#dcd8e9934bedaa8eef911ed41f7ecc048a833412" dependencies = [ "darling", "regex", @@ -2934,7 +2934,7 @@ checksum = "6ce2be8dc25455e1f91df71bfa12ad37d7af1092ae736f3a6cd0e37bc7810596" [[package]] name = "stackable-certs" version = "0.4.0" -source = "git+https://github.com/stackabletech/operator-rs.git?tag=stackable-operator-0.110.1#96f42571ea185a3cd76fedde351fcabbeefcae16" +source = "git+https://github.com/stackabletech//operator-rs.git?branch=feat%2Fderby-host-part#dcd8e9934bedaa8eef911ed41f7ecc048a833412" dependencies = [ "const-oid", "ecdsa", @@ -2984,7 +2984,7 @@ dependencies = [ [[package]] name = "stackable-operator" version = "0.110.1" -source = "git+https://github.com/stackabletech/operator-rs.git?tag=stackable-operator-0.110.1#96f42571ea185a3cd76fedde351fcabbeefcae16" +source = "git+https://github.com/stackabletech//operator-rs.git?branch=feat%2Fderby-host-part#dcd8e9934bedaa8eef911ed41f7ecc048a833412" dependencies = [ "base64", "clap", @@ -3025,7 +3025,7 @@ dependencies = [ [[package]] name = "stackable-operator-derive" version = "0.3.1" -source = "git+https://github.com/stackabletech/operator-rs.git?tag=stackable-operator-0.110.1#96f42571ea185a3cd76fedde351fcabbeefcae16" +source = "git+https://github.com/stackabletech//operator-rs.git?branch=feat%2Fderby-host-part#dcd8e9934bedaa8eef911ed41f7ecc048a833412" dependencies = [ "darling", "proc-macro2", @@ -3036,7 +3036,7 @@ dependencies = [ [[package]] name = "stackable-shared" version = "0.1.0" -source = "git+https://github.com/stackabletech/operator-rs.git?tag=stackable-operator-0.110.1#96f42571ea185a3cd76fedde351fcabbeefcae16" +source = "git+https://github.com/stackabletech//operator-rs.git?branch=feat%2Fderby-host-part#dcd8e9934bedaa8eef911ed41f7ecc048a833412" dependencies = [ "jiff", "k8s-openapi", @@ -3053,7 +3053,7 @@ dependencies = [ [[package]] name = "stackable-telemetry" version = "0.6.3" -source = "git+https://github.com/stackabletech/operator-rs.git?tag=stackable-operator-0.110.1#96f42571ea185a3cd76fedde351fcabbeefcae16" +source = "git+https://github.com/stackabletech//operator-rs.git?branch=feat%2Fderby-host-part#dcd8e9934bedaa8eef911ed41f7ecc048a833412" dependencies = [ "axum", "clap", @@ -3077,8 +3077,9 @@ dependencies = [ [[package]] name = "stackable-versioned" version = "0.9.0" -source = "git+https://github.com/stackabletech/operator-rs.git?tag=stackable-operator-0.110.1#96f42571ea185a3cd76fedde351fcabbeefcae16" +source = "git+https://github.com/stackabletech//operator-rs.git?branch=feat%2Fderby-host-part#dcd8e9934bedaa8eef911ed41f7ecc048a833412" dependencies = [ + "kube", "schemars", "serde", "serde_json", @@ -3090,7 +3091,7 @@ dependencies = [ [[package]] name = "stackable-versioned-macros" version = "0.9.0" -source = "git+https://github.com/stackabletech/operator-rs.git?tag=stackable-operator-0.110.1#96f42571ea185a3cd76fedde351fcabbeefcae16" +source = "git+https://github.com/stackabletech//operator-rs.git?branch=feat%2Fderby-host-part#dcd8e9934bedaa8eef911ed41f7ecc048a833412" dependencies = [ "convert_case", "convert_case_extras", @@ -3108,7 +3109,7 @@ dependencies = [ [[package]] name = "stackable-webhook" version = "0.9.1" -source = "git+https://github.com/stackabletech/operator-rs.git?tag=stackable-operator-0.110.1#96f42571ea185a3cd76fedde351fcabbeefcae16" +source = "git+https://github.com/stackabletech//operator-rs.git?branch=feat%2Fderby-host-part#dcd8e9934bedaa8eef911ed41f7ecc048a833412" dependencies = [ "arc-swap", "async-trait", diff --git a/Cargo.nix b/Cargo.nix index 7b791636..d3146b8f 100644 --- a/Cargo.nix +++ b/Cargo.nix @@ -4889,9 +4889,9 @@ rec { edition = "2024"; workspace_member = null; src = pkgs.fetchgit { - url = "https://github.com/stackabletech/operator-rs.git"; - rev = "96f42571ea185a3cd76fedde351fcabbeefcae16"; - sha256 = "0d58yvxvy8hbai12bjhcyvh4zw182j5dsfyqja4k2xc1vzjy29by"; + url = "https://github.com/stackabletech//operator-rs.git"; + rev = "dcd8e9934bedaa8eef911ed41f7ecc048a833412"; + sha256 = "19rv0g13a529yzk2fc0y3hxznhc2a1r1zfcpl6wx3w3m7cwrnhjs"; }; libName = "k8s_version"; authors = [ @@ -9632,9 +9632,9 @@ rec { edition = "2024"; workspace_member = null; src = pkgs.fetchgit { - url = "https://github.com/stackabletech/operator-rs.git"; - rev = "96f42571ea185a3cd76fedde351fcabbeefcae16"; - sha256 = "0d58yvxvy8hbai12bjhcyvh4zw182j5dsfyqja4k2xc1vzjy29by"; + url = "https://github.com/stackabletech//operator-rs.git"; + rev = "dcd8e9934bedaa8eef911ed41f7ecc048a833412"; + sha256 = "19rv0g13a529yzk2fc0y3hxznhc2a1r1zfcpl6wx3w3m7cwrnhjs"; }; libName = "stackable_certs"; authors = [ @@ -9844,9 +9844,9 @@ rec { edition = "2024"; workspace_member = null; src = pkgs.fetchgit { - url = "https://github.com/stackabletech/operator-rs.git"; - rev = "96f42571ea185a3cd76fedde351fcabbeefcae16"; - sha256 = "0d58yvxvy8hbai12bjhcyvh4zw182j5dsfyqja4k2xc1vzjy29by"; + url = "https://github.com/stackabletech//operator-rs.git"; + rev = "dcd8e9934bedaa8eef911ed41f7ecc048a833412"; + sha256 = "19rv0g13a529yzk2fc0y3hxznhc2a1r1zfcpl6wx3w3m7cwrnhjs"; }; libName = "stackable_operator"; authors = [ @@ -10024,9 +10024,9 @@ rec { edition = "2024"; workspace_member = null; src = pkgs.fetchgit { - url = "https://github.com/stackabletech/operator-rs.git"; - rev = "96f42571ea185a3cd76fedde351fcabbeefcae16"; - sha256 = "0d58yvxvy8hbai12bjhcyvh4zw182j5dsfyqja4k2xc1vzjy29by"; + url = "https://github.com/stackabletech//operator-rs.git"; + rev = "dcd8e9934bedaa8eef911ed41f7ecc048a833412"; + sha256 = "19rv0g13a529yzk2fc0y3hxznhc2a1r1zfcpl6wx3w3m7cwrnhjs"; }; procMacro = true; libName = "stackable_operator_derive"; @@ -10059,9 +10059,9 @@ rec { edition = "2024"; workspace_member = null; src = pkgs.fetchgit { - url = "https://github.com/stackabletech/operator-rs.git"; - rev = "96f42571ea185a3cd76fedde351fcabbeefcae16"; - sha256 = "0d58yvxvy8hbai12bjhcyvh4zw182j5dsfyqja4k2xc1vzjy29by"; + url = "https://github.com/stackabletech//operator-rs.git"; + rev = "dcd8e9934bedaa8eef911ed41f7ecc048a833412"; + sha256 = "19rv0g13a529yzk2fc0y3hxznhc2a1r1zfcpl6wx3w3m7cwrnhjs"; }; libName = "stackable_shared"; authors = [ @@ -10140,9 +10140,9 @@ rec { edition = "2024"; workspace_member = null; src = pkgs.fetchgit { - url = "https://github.com/stackabletech/operator-rs.git"; - rev = "96f42571ea185a3cd76fedde351fcabbeefcae16"; - sha256 = "0d58yvxvy8hbai12bjhcyvh4zw182j5dsfyqja4k2xc1vzjy29by"; + url = "https://github.com/stackabletech//operator-rs.git"; + rev = "dcd8e9934bedaa8eef911ed41f7ecc048a833412"; + sha256 = "19rv0g13a529yzk2fc0y3hxznhc2a1r1zfcpl6wx3w3m7cwrnhjs"; }; libName = "stackable_telemetry"; authors = [ @@ -10250,15 +10250,21 @@ rec { edition = "2024"; workspace_member = null; src = pkgs.fetchgit { - url = "https://github.com/stackabletech/operator-rs.git"; - rev = "96f42571ea185a3cd76fedde351fcabbeefcae16"; - sha256 = "0d58yvxvy8hbai12bjhcyvh4zw182j5dsfyqja4k2xc1vzjy29by"; + url = "https://github.com/stackabletech//operator-rs.git"; + rev = "dcd8e9934bedaa8eef911ed41f7ecc048a833412"; + sha256 = "19rv0g13a529yzk2fc0y3hxznhc2a1r1zfcpl6wx3w3m7cwrnhjs"; }; libName = "stackable_versioned"; authors = [ "Stackable GmbH " ]; dependencies = [ + { + name = "kube"; + packageId = "kube"; + usesDefaultFeatures = false; + features = [ "client" "jsonpatch" "runtime" "derive" "admission" "rustls-tls" "ring" ]; + } { name = "schemars"; packageId = "schemars"; @@ -10294,9 +10300,9 @@ rec { edition = "2024"; workspace_member = null; src = pkgs.fetchgit { - url = "https://github.com/stackabletech/operator-rs.git"; - rev = "96f42571ea185a3cd76fedde351fcabbeefcae16"; - sha256 = "0d58yvxvy8hbai12bjhcyvh4zw182j5dsfyqja4k2xc1vzjy29by"; + url = "https://github.com/stackabletech//operator-rs.git"; + rev = "dcd8e9934bedaa8eef911ed41f7ecc048a833412"; + sha256 = "19rv0g13a529yzk2fc0y3hxznhc2a1r1zfcpl6wx3w3m7cwrnhjs"; }; procMacro = true; libName = "stackable_versioned_macros"; @@ -10362,9 +10368,9 @@ rec { edition = "2024"; workspace_member = null; src = pkgs.fetchgit { - url = "https://github.com/stackabletech/operator-rs.git"; - rev = "96f42571ea185a3cd76fedde351fcabbeefcae16"; - sha256 = "0d58yvxvy8hbai12bjhcyvh4zw182j5dsfyqja4k2xc1vzjy29by"; + url = "https://github.com/stackabletech//operator-rs.git"; + rev = "dcd8e9934bedaa8eef911ed41f7ecc048a833412"; + sha256 = "19rv0g13a529yzk2fc0y3hxznhc2a1r1zfcpl6wx3w3m7cwrnhjs"; }; libName = "stackable_webhook"; authors = [ diff --git a/Cargo.toml b/Cargo.toml index dee8f8ca..e3f53167 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -34,4 +34,4 @@ tracing = "0.1" [patch."https://github.com/stackabletech/operator-rs.git"] # stackable-operator = { path = "../operator-rs/crates/stackable-operator" } -# stackable-operator = { git = "https://github.com/stackabletech//operator-rs.git", branch = "main" } +stackable-operator = { git = "https://github.com/stackabletech//operator-rs.git", branch = "feat/derby-host-part" } diff --git a/crate-hashes.json b/crate-hashes.json index e19b553d..5f02d593 100644 --- a/crate-hashes.json +++ b/crate-hashes.json @@ -1,12 +1,12 @@ { - "git+https://github.com/stackabletech/operator-rs.git?tag=stackable-operator-0.110.1#k8s-version@0.1.3": "0d58yvxvy8hbai12bjhcyvh4zw182j5dsfyqja4k2xc1vzjy29by", - "git+https://github.com/stackabletech/operator-rs.git?tag=stackable-operator-0.110.1#stackable-certs@0.4.0": "0d58yvxvy8hbai12bjhcyvh4zw182j5dsfyqja4k2xc1vzjy29by", - "git+https://github.com/stackabletech/operator-rs.git?tag=stackable-operator-0.110.1#stackable-operator-derive@0.3.1": "0d58yvxvy8hbai12bjhcyvh4zw182j5dsfyqja4k2xc1vzjy29by", - "git+https://github.com/stackabletech/operator-rs.git?tag=stackable-operator-0.110.1#stackable-operator@0.110.1": "0d58yvxvy8hbai12bjhcyvh4zw182j5dsfyqja4k2xc1vzjy29by", - "git+https://github.com/stackabletech/operator-rs.git?tag=stackable-operator-0.110.1#stackable-shared@0.1.0": "0d58yvxvy8hbai12bjhcyvh4zw182j5dsfyqja4k2xc1vzjy29by", - "git+https://github.com/stackabletech/operator-rs.git?tag=stackable-operator-0.110.1#stackable-telemetry@0.6.3": "0d58yvxvy8hbai12bjhcyvh4zw182j5dsfyqja4k2xc1vzjy29by", - "git+https://github.com/stackabletech/operator-rs.git?tag=stackable-operator-0.110.1#stackable-versioned-macros@0.9.0": "0d58yvxvy8hbai12bjhcyvh4zw182j5dsfyqja4k2xc1vzjy29by", - "git+https://github.com/stackabletech/operator-rs.git?tag=stackable-operator-0.110.1#stackable-versioned@0.9.0": "0d58yvxvy8hbai12bjhcyvh4zw182j5dsfyqja4k2xc1vzjy29by", - "git+https://github.com/stackabletech/operator-rs.git?tag=stackable-operator-0.110.1#stackable-webhook@0.9.1": "0d58yvxvy8hbai12bjhcyvh4zw182j5dsfyqja4k2xc1vzjy29by", + "git+https://github.com/stackabletech//operator-rs.git?branch=feat%2Fderby-host-part#k8s-version@0.1.3": "19rv0g13a529yzk2fc0y3hxznhc2a1r1zfcpl6wx3w3m7cwrnhjs", + "git+https://github.com/stackabletech//operator-rs.git?branch=feat%2Fderby-host-part#stackable-certs@0.4.0": "19rv0g13a529yzk2fc0y3hxznhc2a1r1zfcpl6wx3w3m7cwrnhjs", + "git+https://github.com/stackabletech//operator-rs.git?branch=feat%2Fderby-host-part#stackable-operator-derive@0.3.1": "19rv0g13a529yzk2fc0y3hxznhc2a1r1zfcpl6wx3w3m7cwrnhjs", + "git+https://github.com/stackabletech//operator-rs.git?branch=feat%2Fderby-host-part#stackable-operator@0.110.1": "19rv0g13a529yzk2fc0y3hxznhc2a1r1zfcpl6wx3w3m7cwrnhjs", + "git+https://github.com/stackabletech//operator-rs.git?branch=feat%2Fderby-host-part#stackable-shared@0.1.0": "19rv0g13a529yzk2fc0y3hxznhc2a1r1zfcpl6wx3w3m7cwrnhjs", + "git+https://github.com/stackabletech//operator-rs.git?branch=feat%2Fderby-host-part#stackable-telemetry@0.6.3": "19rv0g13a529yzk2fc0y3hxznhc2a1r1zfcpl6wx3w3m7cwrnhjs", + "git+https://github.com/stackabletech//operator-rs.git?branch=feat%2Fderby-host-part#stackable-versioned-macros@0.9.0": "19rv0g13a529yzk2fc0y3hxznhc2a1r1zfcpl6wx3w3m7cwrnhjs", + "git+https://github.com/stackabletech//operator-rs.git?branch=feat%2Fderby-host-part#stackable-versioned@0.9.0": "19rv0g13a529yzk2fc0y3hxznhc2a1r1zfcpl6wx3w3m7cwrnhjs", + "git+https://github.com/stackabletech//operator-rs.git?branch=feat%2Fderby-host-part#stackable-webhook@0.9.1": "19rv0g13a529yzk2fc0y3hxznhc2a1r1zfcpl6wx3w3m7cwrnhjs", "git+https://github.com/stackabletech/product-config.git?tag=0.8.0#product-config@0.8.0": "1dz70kapm2wdqcr7ndyjji0lhsl98bsq95gnb2lw487wf6yr7987" } \ No newline at end of file diff --git a/deploy/config-spec/properties.yaml b/deploy/config-spec/properties.yaml index 5b620e02..e4ede43b 100644 --- a/deploy/config-spec/properties.yaml +++ b/deploy/config-spec/properties.yaml @@ -1,3 +1,4 @@ +--- version: 0.1.0 spec: units: @@ -8,13 +9,15 @@ spec: - "/tmp/xyz" - unit: &unitPort name: "port" - regex: "^([0-9]{1,4}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])$" + regex: "^([0-9]{1,4}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]\ + |6553[0-5])$" - unit: &unitPrometheusNamespace name: "prometheusNamespace" regex: "^[a-zA-Z_:][a-zA-Z0-9_:]*$" - unit: &unitDuration name: "duration" - regex: "^P(?!$)(\\d+Y)?(\\d+M)?(\\d+W)?(\\d+D)?(T(?=\\d)(\\d+H)?(\\d+M)?(\\d+S)?)?$" + regex: "^P(?!$)(\\d+Y)?(\\d+M)?(\\d+W)?(\\d+D)?(T(?=\\d)(\\d+H)?(\\d+M)?(\\d+S)\ + ?)?$" examples: - "PT300S" @@ -214,7 +217,6 @@ properties: comment: "TTL for host names that cannot be resolved." description: "TTL for host names that cannot be resolved." - - property: &plaintextPort propertyNames: - name: "druid.plaintextPort" @@ -418,139 +420,6 @@ properties: required: true asOfVersion: "0.0.0" - - property: &metadataStorageType - propertyNames: - - name: "druid.metadata.storage.type" - kind: - type: "file" - file: "runtime.properties" - datatype: - type: "string" - allowedValues: - - "mysql" - - "postgresql" - - "derby" - roles: - - name: "broker" - required: true - - name: "coordinator" - required: true - - name: "historical" - required: true - - name: "middlemanager" - required: true - - name: "router" - required: true - asOfVersion: "0.0.0" - - - property: &metadataStorageConnectURI - propertyNames: - - name: "druid.metadata.storage.connector.connectURI" - kind: - type: "file" - file: "runtime.properties" - datatype: - type: "string" - roles: - - name: "broker" - required: true - - name: "coordinator" - required: true - - name: "historical" - required: true - - name: "middlemanager" - required: true - - name: "router" - required: true - asOfVersion: "0.0.0" - - - property: &metadataStorageHost - propertyNames: - - name: "druid.metadata.storage.connector.host" - kind: - type: "file" - file: "runtime.properties" - datatype: - type: "string" - roles: - - name: "broker" - required: true - - name: "coordinator" - required: true - - name: "historical" - required: true - - name: "middlemanager" - required: true - - name: "router" - required: true - asOfVersion: "0.0.0" - - - property: &metadataStoragePort - propertyNames: - - name: "druid.metadata.storage.connector.port" - kind: - type: "file" - file: "runtime.properties" - datatype: - type: "integer" - min: "1024" - max: "65535" - unit: *unitPort - roles: - - name: "broker" - required: true - - name: "coordinator" - required: true - - name: "historical" - required: true - - name: "middlemanager" - required: true - - name: "router" - required: true - asOfVersion: "0.0.0" - - - property: &metadataStorageUser - propertyNames: - - name: "druid.metadata.storage.connector.user" - kind: - type: "file" - file: "runtime.properties" - datatype: - type: "string" - roles: - - name: "broker" - required: false - - name: "coordinator" - required: false - - name: "historical" - required: false - - name: "middlemanager" - required: false - - name: "router" - required: false - asOfVersion: "0.0.0" - - - property: &metadataStoragePassword - propertyNames: - - name: "druid.metadata.storage.connector.password" - kind: - type: "file" - file: "runtime.properties" - datatype: - type: "string" - roles: - - name: "broker" - required: false - - name: "coordinator" - required: false - - name: "historical" - required: false - - name: "middlemanager" - required: false - - name: "router" - required: false - asOfVersion: "0.0.0" - - property: &indexerLogsDirectory propertyNames: - name: "druid.indexer.logs.directory" @@ -661,7 +530,10 @@ properties: type: "string" defaultValues: - fromVersion: "0.0.0" - value: "-server -Xms256m -Xmx256m -XX:MaxDirectMemorySize=300m -Duser.timezone=UTC -Dfile.encoding=UTF-8 -XX:+ExitOnOutOfMemoryError -Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager" + value: "-server -Xms256m -Xmx256m -XX:MaxDirectMemorySize=300m + -Duser.timezone=UTC -Dfile.encoding=UTF-8 + -XX:+ExitOnOutOfMemoryError + -Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager" roles: - name: "broker" required: false diff --git a/deploy/helm/chart_testing.yaml b/deploy/helm/chart_testing.yaml index 82b39c26..253af46d 100644 --- a/deploy/helm/chart_testing.yaml +++ b/deploy/helm/chart_testing.yaml @@ -1,3 +1,4 @@ +--- remote: origin target-branch: main chart-dirs: diff --git a/deploy/helm/druid-operator/configs/properties.yaml b/deploy/helm/druid-operator/configs/properties.yaml index 5b620e02..e4ede43b 100644 --- a/deploy/helm/druid-operator/configs/properties.yaml +++ b/deploy/helm/druid-operator/configs/properties.yaml @@ -1,3 +1,4 @@ +--- version: 0.1.0 spec: units: @@ -8,13 +9,15 @@ spec: - "/tmp/xyz" - unit: &unitPort name: "port" - regex: "^([0-9]{1,4}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])$" + regex: "^([0-9]{1,4}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]\ + |6553[0-5])$" - unit: &unitPrometheusNamespace name: "prometheusNamespace" regex: "^[a-zA-Z_:][a-zA-Z0-9_:]*$" - unit: &unitDuration name: "duration" - regex: "^P(?!$)(\\d+Y)?(\\d+M)?(\\d+W)?(\\d+D)?(T(?=\\d)(\\d+H)?(\\d+M)?(\\d+S)?)?$" + regex: "^P(?!$)(\\d+Y)?(\\d+M)?(\\d+W)?(\\d+D)?(T(?=\\d)(\\d+H)?(\\d+M)?(\\d+S)\ + ?)?$" examples: - "PT300S" @@ -214,7 +217,6 @@ properties: comment: "TTL for host names that cannot be resolved." description: "TTL for host names that cannot be resolved." - - property: &plaintextPort propertyNames: - name: "druid.plaintextPort" @@ -418,139 +420,6 @@ properties: required: true asOfVersion: "0.0.0" - - property: &metadataStorageType - propertyNames: - - name: "druid.metadata.storage.type" - kind: - type: "file" - file: "runtime.properties" - datatype: - type: "string" - allowedValues: - - "mysql" - - "postgresql" - - "derby" - roles: - - name: "broker" - required: true - - name: "coordinator" - required: true - - name: "historical" - required: true - - name: "middlemanager" - required: true - - name: "router" - required: true - asOfVersion: "0.0.0" - - - property: &metadataStorageConnectURI - propertyNames: - - name: "druid.metadata.storage.connector.connectURI" - kind: - type: "file" - file: "runtime.properties" - datatype: - type: "string" - roles: - - name: "broker" - required: true - - name: "coordinator" - required: true - - name: "historical" - required: true - - name: "middlemanager" - required: true - - name: "router" - required: true - asOfVersion: "0.0.0" - - - property: &metadataStorageHost - propertyNames: - - name: "druid.metadata.storage.connector.host" - kind: - type: "file" - file: "runtime.properties" - datatype: - type: "string" - roles: - - name: "broker" - required: true - - name: "coordinator" - required: true - - name: "historical" - required: true - - name: "middlemanager" - required: true - - name: "router" - required: true - asOfVersion: "0.0.0" - - - property: &metadataStoragePort - propertyNames: - - name: "druid.metadata.storage.connector.port" - kind: - type: "file" - file: "runtime.properties" - datatype: - type: "integer" - min: "1024" - max: "65535" - unit: *unitPort - roles: - - name: "broker" - required: true - - name: "coordinator" - required: true - - name: "historical" - required: true - - name: "middlemanager" - required: true - - name: "router" - required: true - asOfVersion: "0.0.0" - - - property: &metadataStorageUser - propertyNames: - - name: "druid.metadata.storage.connector.user" - kind: - type: "file" - file: "runtime.properties" - datatype: - type: "string" - roles: - - name: "broker" - required: false - - name: "coordinator" - required: false - - name: "historical" - required: false - - name: "middlemanager" - required: false - - name: "router" - required: false - asOfVersion: "0.0.0" - - - property: &metadataStoragePassword - propertyNames: - - name: "druid.metadata.storage.connector.password" - kind: - type: "file" - file: "runtime.properties" - datatype: - type: "string" - roles: - - name: "broker" - required: false - - name: "coordinator" - required: false - - name: "historical" - required: false - - name: "middlemanager" - required: false - - name: "router" - required: false - asOfVersion: "0.0.0" - - property: &indexerLogsDirectory propertyNames: - name: "druid.indexer.logs.directory" @@ -661,7 +530,10 @@ properties: type: "string" defaultValues: - fromVersion: "0.0.0" - value: "-server -Xms256m -Xmx256m -XX:MaxDirectMemorySize=300m -Duser.timezone=UTC -Dfile.encoding=UTF-8 -XX:+ExitOnOutOfMemoryError -Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager" + value: "-server -Xms256m -Xmx256m -XX:MaxDirectMemorySize=300m + -Duser.timezone=UTC -Dfile.encoding=UTF-8 + -XX:+ExitOnOutOfMemoryError + -Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager" roles: - name: "broker" required: false diff --git a/dev-cluster.yml b/dev-cluster.yml index 01279c6f..513d9242 100644 --- a/dev-cluster.yml +++ b/dev-cluster.yml @@ -1,3 +1,4 @@ +--- # # Create a cluster: # @@ -10,13 +11,13 @@ kind: Cluster apiVersion: kind.x-k8s.io/v1alpha4 nodes: -- role: control-plane -- role: worker - kubeadmConfigPatches: - - | - kind: JoinConfiguration - nodeRegistration: - kubeletExtraArgs: - node-labels: "nodeType=druid-data" -- role: worker -- role: worker + - role: control-plane + - role: worker + kubeadmConfigPatches: + - | + kind: JoinConfiguration + nodeRegistration: + kubeletExtraArgs: + node-labels: "nodeType=druid-data" + - role: worker + - role: worker diff --git a/docs/modules/druid/examples/getting_started/druid.yaml b/docs/modules/druid/examples/getting_started/druid.yaml index 7106458b..7da5cf42 100644 --- a/docs/modules/druid/examples/getting_started/druid.yaml +++ b/docs/modules/druid/examples/getting_started/druid.yaml @@ -12,12 +12,11 @@ spec: hdfs: configMapName: simple-hdfs directory: /druid - metadataStorageDatabase: - dbType: postgresql - connString: jdbc:postgresql://postgresql-druid/druid - host: postgresql-druid - port: 5432 - credentialsSecret: druid-db-credentials + metadataDatabase: + postgresql: + host: postgresql-druid + database: druid + credentialsSecretName: druid-db-credentials brokers: roleGroups: default: diff --git a/docs/modules/druid/examples/getting_started/druid.yaml.j2 b/docs/modules/druid/examples/getting_started/druid.yaml.j2 index 7106458b..7da5cf42 100644 --- a/docs/modules/druid/examples/getting_started/druid.yaml.j2 +++ b/docs/modules/druid/examples/getting_started/druid.yaml.j2 @@ -12,12 +12,11 @@ spec: hdfs: configMapName: simple-hdfs directory: /druid - metadataStorageDatabase: - dbType: postgresql - connString: jdbc:postgresql://postgresql-druid/druid - host: postgresql-druid - port: 5432 - credentialsSecret: druid-db-credentials + metadataDatabase: + postgresql: + host: postgresql-druid + database: druid + credentialsSecretName: druid-db-credentials brokers: roleGroups: default: diff --git a/docs/modules/druid/pages/getting_started/first_steps.adoc b/docs/modules/druid/pages/getting_started/first_steps.adoc index 7a7db01a..f4c8cf7b 100644 --- a/docs/modules/druid/pages/getting_started/first_steps.adoc +++ b/docs/modules/druid/pages/getting_started/first_steps.adoc @@ -76,10 +76,6 @@ include::example$getting_started/getting_started.sh[tag=install-druid] This creates the actual Druid Stacklet. -WARNING: This Druid instance uses Derby (`dbType: derby`) as a metadata store, which is an interal SQL database. -It is not persisted and not suitable for production use! -Consult the https://druid.apache.org/docs/latest/dependencies/metadata-storage.html#available-metadata-stores[Druid documentation] for a list of supported databases and setup instructions for production instances. - == Verify that it works Submit an ingestion job and then query the ingested data -- either through the web interface or the API. diff --git a/extra/crds.yaml b/extra/crds.yaml index 959d6c31..0e61f44f 100644 --- a/extra/crds.yaml +++ b/extra/crds.yaml @@ -1453,44 +1453,109 @@ spec: type: string type: object type: object - metadataStorageDatabase: + metadataDatabase: description: Druid requires an SQL database to store metadata into. Specify connection information here. + oneOf: + - required: + - postgresql + - required: + - mysql + - required: + - derby properties: - connString: + derby: description: |- - The connect string for the database, for Postgres this could look like: - `jdbc:postgresql://postgresql-druid/druid` - type: string - credentialsSecret: - description: |- - A reference to a Secret containing the database credentials. - The Secret needs to contain the keys `username` and `password`. - nullable: true - type: string - dbType: + Connection settings for an embedded [Apache Derby](https://db.apache.org/derby/) database. + + Derby is an embedded, file-based Java database engine that requires no separate server process. + It is typically used for development, testing, or as a lightweight metastore backend (e.g. for + Apache Hive). + properties: + location: + description: |- + Path on the filesystem where Derby stores its database files. + + If not specified, defaults to `/tmp/derby/{unique_database_name}/derby.db`. + The `{unique_database_name}` part is automatically handled by the operator and is added to + prevent clashing database files. The `create=true` flag is always appended to the JDBC URL, + so the database is created automatically if it does not yet exist at this location. + nullable: true + type: string + type: object + mysql: description: |- - The database type. Supported values are: `derby`, `mysql` and `postgres`. - Note that a Derby database created locally in the container is not persisted! - Derby is not suitable for production use. - enum: - - derby - - mysql - - postgresql - type: string - host: - description: The host, i.e. `postgresql-druid`. - type: string - port: - description: The port, i.e. 5432 - format: uint16 - maximum: 65535.0 - minimum: 0.0 - type: integer - required: - - connString - - dbType - - host - - port + Connection settings for a [MySQL](https://www.mysql.com/) database. + + Please note that - due to license issues - we don't ship the mysql driver, you need to add + it it yourself. + properties: + credentialsSecretName: + description: |- + Name of a Secret containing the `username` and `password` keys used to authenticate + against the MySQL server. + type: string + database: + description: Name of the database (schema) to connect to. + type: string + host: + description: Hostname or IP address of the MySQL server. + type: string + parameters: + additionalProperties: + type: string + default: {} + description: |- + Additional map of connection parameters to append to the connection URL. The given + `HashMap` will be converted to query parameters in the form of + `?param1=value1¶m2=value2`. + type: object + port: + default: 3306 + description: Port the MySQL server is listening on. Defaults to `3306`. + format: uint16 + maximum: 65535.0 + minimum: 0.0 + type: integer + required: + - credentialsSecretName + - database + - host + type: object + postgresql: + description: Connection settings for a [PostgreSQL](https://www.postgresql.org/) database. + properties: + credentialsSecretName: + description: |- + Name of a Secret containing the `username` and `password` keys used to authenticate + against the PostgreSQL server. + type: string + database: + description: Name of the database (schema) to connect to. + type: string + host: + description: Hostname or IP address of the PostgreSQL server. + type: string + parameters: + additionalProperties: + type: string + default: {} + description: |- + Additional map of JDBC connection parameters to append to the connection URL. The given + `HashMap` will be converted to query parameters in the form of + `?param1=value1¶m2=value2`. + type: object + port: + default: 5432 + description: Port the PostgreSQL server is listening on. Defaults to `5432`. + format: uint16 + maximum: 65535.0 + minimum: 0.0 + type: integer + required: + - credentialsSecretName + - database + - host + type: object type: object tls: default: @@ -1529,7 +1594,7 @@ spec: type: string required: - deepStorage - - metadataStorageDatabase + - metadataDatabase - zookeeperConfigMapName type: object clusterOperation: diff --git a/rust/operator-binary/src/config/jvm.rs b/rust/operator-binary/src/config/jvm.rs index 86b88be1..90f8ffa0 100644 --- a/rust/operator-binary/src/config/jvm.rs +++ b/rust/operator-binary/src/config/jvm.rs @@ -100,12 +100,11 @@ mod tests { hdfs: configMapName: simple-hdfs directory: /druid - metadataStorageDatabase: - dbType: postgresql - connString: jdbc:postgresql://druid-postgresql/druid - host: druid-postgresql - port: 5432 - credentialsSecret: mySecret + metadataDatabase: + postgresql: + host: druid-postgresql + database: druid + credentialsSecretName: mySecret zookeeperConfigMapName: simple-druid-znode brokers: roleGroups: @@ -187,12 +186,11 @@ mod tests { hdfs: configMapName: simple-hdfs directory: /druid - metadataStorageDatabase: - dbType: postgresql - connString: jdbc:postgresql://druid-postgresql/druid - host: druid-postgresql - port: 5432 - credentialsSecret: mySecret + metadataDatabase: + postgresql: + host: druid-postgresql + database: druid + credentialsSecretName: mySecret zookeeperConfigMapName: simple-druid-znode brokers: roleGroups: diff --git a/rust/operator-binary/src/crd/affinity.rs b/rust/operator-binary/src/crd/affinity.rs index e3d882df..23b61397 100644 --- a/rust/operator-binary/src/crd/affinity.rs +++ b/rust/operator-binary/src/crd/affinity.rs @@ -112,12 +112,11 @@ mod tests { hdfs: configMapName: simple-hdfs directory: /druid - metadataStorageDatabase: - dbType: postgresql - connString: jdbc:postgresql://druid-postgresql/druid - host: druid-postgresql - port: 5432 - credentialsSecret: mySecret + metadataDatabase: + postgresql: + host: druid-postgresql + database: druid + credentialsSecretName: mySecret zookeeperConfigMapName: simple-druid-znode brokers: roleGroups: diff --git a/rust/operator-binary/src/crd/authentication.rs b/rust/operator-binary/src/crd/authentication.rs index 1d1f1088..034723e5 100644 --- a/rust/operator-binary/src/crd/authentication.rs +++ b/rust/operator-binary/src/crd/authentication.rs @@ -258,11 +258,8 @@ deepStorage: hdfs: configMapName: druid-hdfs directory: /druid -metadataStorageDatabase: - dbType: derby - connString: jdbc:derby://localhost:1527/var/druid/metadata.db;create=true - host: localhost - port: 1527 +metadataDatabase: + derby: {} zookeeperConfigMapName: zk-config-map "#; diff --git a/rust/operator-binary/src/crd/database.rs b/rust/operator-binary/src/crd/database.rs new file mode 100644 index 00000000..318433ae --- /dev/null +++ b/rust/operator-binary/src/crd/database.rs @@ -0,0 +1,119 @@ +use serde::{Deserialize, Serialize}; +use stackable_operator::{ + database_connections::{ + self, TemplatingMechanism, + databases::{ + derby::DerbyConnection, mysql::MysqlConnection, postgresql::PostgresqlConnection, + }, + drivers::jdbc::{JdbcDatabaseConnection, JdbcDatabaseConnectionDetails}, + }, + schemars::{self, JsonSchema}, +}; + +// metadata storage config properties +pub const METADATA_STORAGE_TYPE: &str = "druid.metadata.storage.type"; +pub const METADATA_STORAGE_CONNECTOR_CONNECT_URI: &str = + "druid.metadata.storage.connector.connectURI"; +pub const METADATA_STORAGE_USER: &str = "druid.metadata.storage.connector.user"; +pub const METADATA_STORAGE_PASSWORD: &str = "druid.metadata.storage.connector.password"; + +#[derive(Clone, Debug, Deserialize, JsonSchema, PartialEq, Serialize)] +#[serde(rename_all = "camelCase")] +pub enum MetadataDatabaseConnection { + // Docs are on the struct + Postgresql(PostgresqlConnection), + + /// Connection settings for a [MySQL](https://www.mysql.com/) database. + /// + /// Please note that - due to license issues - we don't ship the mysql driver, you need to add + /// it it yourself. + Mysql(MysqlConnection), + + // Docs are on the struct + Derby(DerbyConnection), + // We don't support generic as druid only offers the types mentioned above for metadata storage + // See +} + +impl MetadataDatabaseConnection { + /// Name of the database as it should be passed using `METADATA_STORAGE_TYPE` ("druid.metadata.storage.type") property. + pub fn as_db_type(&self) -> &str { + match self { + MetadataDatabaseConnection::Postgresql(_) => "postgresql", + MetadataDatabaseConnection::Mysql(_) => "mysql", + MetadataDatabaseConnection::Derby(_) => "derby", + } + } +} + +impl JdbcDatabaseConnection for MetadataDatabaseConnection { + /// We do *not* implement [`std::ops::Deref`]` for [`MetadataDatabaseConnection`], as we need + /// some special handling for Derby. + fn jdbc_connection_details_with_templating( + &self, + unique_database_name: &str, + templating_mechanism: &TemplatingMechanism, + ) -> Result { + match self { + Self::Postgresql(p) => p.jdbc_connection_details_with_templating( + unique_database_name, + templating_mechanism, + ), + Self::Mysql(m) => m.jdbc_connection_details_with_templating( + unique_database_name, + templating_mechanism, + ), + Self::Derby(d) => { + // According to the [Druid docs](https://druid.apache.org/docs/latest/design/metadata-storage/#derby) + // we should configure something like + // `jdbc:derby://localhost:1527//opt/var/druid_state/derby;create=true` + // instead of the usual `jdbc:derby:/opt/var/druid_state/derby;create=true`. + // + // It looks like Druid always starts Derby at `localhost:1527`, regardless of what we configure here, + // so we can hardcode it here. + d.jdbc_connection_details_with_host_part(unique_database_name, "localhost:1527") + } + } + } +} + +#[cfg(test)] +mod tests { + use rstest::rstest; + use stackable_operator::utils::yaml_from_str_singleton_map; + + use super::*; + + #[rstest] + #[case::postgres( + "postgresql: + host: druid-postgresql + database: druid + credentialsSecretName: druid-credentials", + "jdbc:postgresql://druid-postgresql:5432/druid" + )] + #[case::derby( + "derby: {}", + "jdbc:derby://localhost:1527//tmp/derby/METADATA/derby.db;create=true" + )] + #[case::derby_custom_location( + "derby: + location: /user/provided.db", + "jdbc:derby://localhost:1527//user/provided.db;create=true" + )] + fn test_connection_url( + #[case] database_connection_yaml: &str, + #[case] expected_connection_url: &str, + ) { + let database_connection: MetadataDatabaseConnection = + yaml_from_str_singleton_map(database_connection_yaml).expect("invalid YAML"); + + let jdbc_connection_details = database_connection + .jdbc_connection_details("METADATA") + .expect("failed to get JDBC connection details"); + assert_eq!( + jdbc_connection_details.connection_url.as_str(), + expected_connection_url + ); + } +} diff --git a/rust/operator-binary/src/crd/mod.rs b/rust/operator-binary/src/crd/mod.rs index 4c202368..a0a51fde 100644 --- a/rust/operator-binary/src/crd/mod.rs +++ b/rust/operator-binary/src/crd/mod.rs @@ -46,6 +46,7 @@ use strum::{Display, EnumDiscriminants, EnumIter, EnumString, IntoStaticStr}; use crate::crd::{ affinity::get_affinity, authorization::DruidAuthorization, + database::MetadataDatabaseConnection, resource::RoleResource, tls::{DruidTls, default_druid_tls}, }; @@ -53,6 +54,7 @@ use crate::crd::{ pub mod affinity; pub mod authentication; pub mod authorization; +pub mod database; pub mod memory; pub mod resource; pub mod security; @@ -109,21 +111,12 @@ pub const AUTH_AUTHORIZERS_VALUE: &str = "[\"OpaAuthorizer\"]"; pub const AUTH_AUTHORIZER_OPA_TYPE: &str = "druid.auth.authorizer.OpaAuthorizer.type"; pub const AUTH_AUTHORIZER_OPA_TYPE_VALUE: &str = "opa"; pub const AUTH_AUTHORIZER_OPA_URI: &str = "druid.auth.authorizer.OpaAuthorizer.opaUri"; -// metadata storage config properties -const METADATA_STORAGE_TYPE: &str = "druid.metadata.storage.type"; -const METADATA_STORAGE_URI: &str = "druid.metadata.storage.connector.connectURI"; -const METADATA_STORAGE_HOST: &str = "druid.metadata.storage.connector.host"; -const METADATA_STORAGE_PORT: &str = "druid.metadata.storage.connector.port"; -const METADATA_STORAGE_USER: &str = "druid.metadata.storage.connector.user"; -const METADATA_STORAGE_PASSWORD: &str = "druid.metadata.storage.connector.password"; // indexer properties pub const INDEXER_JAVA_OPTS: &str = "druid.indexer.runner.javaOptsArray"; // historical settings pub const PROCESSING_BUFFER_SIZE_BYTES: &str = "druid.processing.buffer.sizeBytes"; pub const PROCESSING_NUM_MERGE_BUFFERS: &str = "druid.processing.numMergeBuffers"; pub const PROCESSING_NUM_THREADS: &str = "druid.processing.numThreads"; -// extra -pub const CREDENTIALS_SECRET_PROPERTY: &str = "credentialsSecret"; // logs pub const MAX_DRUID_LOG_FILES_SIZE: MemoryQuantity = MemoryQuantity { value: 10.0, @@ -136,10 +129,6 @@ pub const METRICS_PORT: u16 = 9090; pub const COOKIE_PASSPHRASE_ENV: &str = "OIDC_COOKIE_PASSPHRASE"; -// DB credentials - both of these are read from an env var by Druid with the ${env:...} syntax -pub const DB_USERNAME_ENV: &str = "DB_USERNAME_ENV"; -pub const DB_PASSWORD_ENV: &str = "DB_PASSWORD_ENV"; - // Graceful shutdown timeouts const DEFAULT_BROKER_GRACEFUL_SHUTDOWN_TIMEOUT: Duration = Duration::from_minutes_unchecked(5); const DEFAULT_COORDINATOR_GRACEFUL_SHUTDOWN_TIMEOUT: Duration = Duration::from_minutes_unchecked(5); @@ -361,7 +350,7 @@ pub mod versioned { pub ingestion: Option, /// Druid requires an SQL database to store metadata into. Specify connection information here. - pub metadata_storage_database: DatabaseConnectionSpec, + pub metadata_database: MetadataDatabaseConnection, /// TLS encryption settings for Druid, more information in the /// [security documentation](DOCS_BASE_URL_PLACEHOLDER/druid/usage-guide/security). @@ -420,34 +409,6 @@ impl v1alpha1::DruidCluster { match file { JVM_CONFIG => {} RUNTIME_PROPS => { - let mds = &self.spec.cluster_config.metadata_storage_database; - result.insert( - METADATA_STORAGE_TYPE.to_string(), - Some(mds.db_type.to_string()), - ); - result.insert( - METADATA_STORAGE_URI.to_string(), - Some(mds.conn_string.to_string()), - ); - result.insert( - METADATA_STORAGE_HOST.to_string(), - Some(mds.host.to_string()), - ); - result.insert( - METADATA_STORAGE_PORT.to_string(), - Some(mds.port.to_string()), - ); - if mds.credentials_secret.is_some() { - result.insert( - METADATA_STORAGE_USER.to_string(), - Some(format!("${{env:{DB_USERNAME_ENV}}}")), - ); - result.insert( - METADATA_STORAGE_PASSWORD.to_string(), - Some(format!("${{env:{DB_PASSWORD_ENV}}}")), - ); - } - // OPA if let Some(DruidAuthorization { opa: _ }) = &self.spec.cluster_config.authorization { @@ -835,6 +796,15 @@ impl v1alpha1::DruidCluster { } } +#[cfg(test)] +impl stackable_operator::versioned::test_utils::RoundtripTestData for v1alpha1::DruidClusterSpec { + fn roundtrip_test_data() -> Vec { + todo!( + "Return some test data according to https://stackabletech.github.io/operator-rs/stackable_versioned/test_utils/trait.RoundtripTestData.html" + ) + } +} + #[derive( Clone, Debug, @@ -1155,43 +1125,6 @@ impl DruidRole { } } -#[derive(Clone, Debug, Default, Deserialize, JsonSchema, Serialize)] -#[serde(rename_all = "camelCase")] -pub struct DatabaseConnectionSpec { - /// The database type. Supported values are: `derby`, `mysql` and `postgres`. - /// Note that a Derby database created locally in the container is not persisted! - /// Derby is not suitable for production use. - pub db_type: DbType, - /// The connect string for the database, for Postgres this could look like: - /// `jdbc:postgresql://postgresql-druid/druid` - pub conn_string: String, - /// The host, i.e. `postgresql-druid`. - pub host: String, - /// The port, i.e. 5432 - pub port: u16, - /// A reference to a Secret containing the database credentials. - /// The Secret needs to contain the keys `username` and `password`. - pub credentials_secret: Option, -} - -#[derive( - Clone, Debug, Default, Deserialize, Eq, JsonSchema, PartialEq, Serialize, Display, EnumString, -)] -pub enum DbType { - #[serde(rename = "derby")] - #[strum(serialize = "derby")] - #[default] - Derby, - - #[serde(rename = "mysql")] - #[strum(serialize = "mysql")] - Mysql, - - #[serde(rename = "postgresql")] - #[strum(serialize = "postgresql")] - Postgresql, -} - #[derive(Clone, Debug, Deserialize, JsonSchema, PartialEq, Eq, Serialize, Display)] #[serde(rename_all = "camelCase")] pub enum DeepStorageSpec { diff --git a/rust/operator-binary/src/crd/tls.rs b/rust/operator-binary/src/crd/tls.rs index b4fc25b3..c8011b22 100644 --- a/rust/operator-binary/src/crd/tls.rs +++ b/rust/operator-binary/src/crd/tls.rs @@ -40,11 +40,8 @@ deepStorage: hdfs: configMapName: druid-hdfs directory: /druid -metadataStorageDatabase: - dbType: derby - connString: jdbc:derby://localhost:1527/var/druid/metadata.db;create=true - host: localhost - port: 1527 +metadataDatabase: + derby: {} zookeeperConfigMapName: zk-config-map "#; diff --git a/rust/operator-binary/src/druid_controller.rs b/rust/operator-binary/src/druid_controller.rs index 819665c2..9b039250 100644 --- a/rust/operator-binary/src/druid_controller.rs +++ b/rust/operator-binary/src/druid_controller.rs @@ -29,10 +29,10 @@ use stackable_operator::{ opa::OpaApiVersion, product_image_selection::{self, ResolvedProductImage}, rbac::build_rbac_resources, - tls_verification::TlsClientDetailsError, }, constants::RESTART_CONTROLLER_ENABLED_LABEL, crd::s3, + database_connections::drivers::jdbc::JdbcDatabaseConnection as _, k8s_openapi::{ DeepMerge, api::{ @@ -70,19 +70,18 @@ use crate::{ authentication::DruidAuthenticationConfig, config::jvm::construct_jvm_args, crd::{ - APP_NAME, AUTH_AUTHORIZER_OPA_URI, CREDENTIALS_SECRET_PROPERTY, CommonRoleGroupConfig, - Container, DB_PASSWORD_ENV, DB_USERNAME_ENV, DRUID_CONFIG_DIRECTORY, DS_BUCKET, - DeepStorageSpec, DruidClusterStatus, DruidRole, EXTENSIONS_LOADLIST, HDFS_CONFIG_DIRECTORY, - JVM_CONFIG, JVM_SECURITY_PROPERTIES_FILE, LOG_CONFIG_DIRECTORY, MAX_DRUID_LOG_FILES_SIZE, - METRICS_PORT, METRICS_PORT_NAME, OPERATOR_NAME, RUNTIME_PROPS, RW_CONFIG_DIRECTORY, - S3_ACCESS_KEY, S3_ENDPOINT_URL, S3_PATH_STYLE_ACCESS, S3_SECRET_KEY, STACKABLE_LOG_DIR, - ZOOKEEPER_CONNECTION_STRING, authentication::AuthenticationClassesResolved, - authorization::DruidAuthorization, build_recommended_labels, build_string_list, - security::DruidTlsSecurity, v1alpha1, + APP_NAME, AUTH_AUTHORIZER_OPA_URI, CommonRoleGroupConfig, Container, + DRUID_CONFIG_DIRECTORY, DS_BUCKET, DeepStorageSpec, DruidClusterStatus, DruidRole, + EXTENSIONS_LOADLIST, HDFS_CONFIG_DIRECTORY, JVM_CONFIG, JVM_SECURITY_PROPERTIES_FILE, + LOG_CONFIG_DIRECTORY, MAX_DRUID_LOG_FILES_SIZE, METRICS_PORT, METRICS_PORT_NAME, + OPERATOR_NAME, RUNTIME_PROPS, RW_CONFIG_DIRECTORY, S3_ACCESS_KEY, S3_ENDPOINT_URL, + S3_PATH_STYLE_ACCESS, S3_SECRET_KEY, STACKABLE_LOG_DIR, ZOOKEEPER_CONNECTION_STRING, + authentication::AuthenticationClassesResolved, authorization::DruidAuthorization, + build_recommended_labels, build_string_list, security::DruidTlsSecurity, v1alpha1, }, discovery::{self, build_discovery_configmaps}, extensions::get_extension_list, - internal_secret::{create_shared_internal_secret, env_var_from_secret}, + internal_secret::create_shared_internal_secret, listener::{ LISTENER_VOLUME_DIR, LISTENER_VOLUME_NAME, build_group_listener, build_group_listener_pvc, group_listener_name, secret_volume_listener_scope, @@ -114,11 +113,6 @@ pub struct Ctx { #[strum_discriminants(derive(IntoStaticStr))] #[allow(clippy::enum_variant_names)] pub enum Error { - #[snafu(display("failed to apply global Service"))] - ApplyRoleService { - source: stackable_operator::cluster_resources::Error, - }, - #[snafu(display("failed to apply Service for {}", rolegroup))] ApplyRoleGroupService { source: stackable_operator::cluster_resources::Error, @@ -184,9 +178,6 @@ pub enum Error { source: stackable_operator::crd::s3::v1alpha1::ConnectionError, }, - #[snafu(display("failed to configure S3 TLS client details"))] - ConfigureS3TlsClientDetails { source: TlsClientDetailsError }, - #[snafu(display("failed to get deep storage bucket"))] GetDeepStorageBucket { source: stackable_operator::crd::s3::v1alpha1::BucketError, @@ -320,16 +311,6 @@ pub enum Error { source: crate::operations::graceful_shutdown::Error, }, - #[snafu(display("failed to build TLS certificate SecretClass Volume"))] - TlsCertSecretClassVolumeBuild { - source: stackable_operator::builder::pod::volume::SecretOperatorVolumeSourceBuilderError, - }, - - #[snafu(display("failed to build S3 credentials SecretClass Volume"))] - S3CredentialsSecretClassVolumeBuild { - source: stackable_operator::commons::secret_class::SecretClassVolumeError, - }, - #[snafu(display("failed to add OIDC Volumes and VolumeMounts to the Pod and containers"))] AuthVolumesBuild { source: crate::authentication::Error, @@ -384,6 +365,11 @@ pub enum Error { ResolveProductImage { source: product_image_selection::Error, }, + + #[snafu(display("invalid metadata database connection"))] + InvalidMetadataDatabaseConnection { + source: stackable_operator::database_connections::Error, + }, } type Result = std::result::Result; @@ -726,6 +712,12 @@ fn build_rolegroup_config_map( })?; let role = druid.get_role(&druid_role); let mut cm_conf_data = BTreeMap::new(); // filename -> filecontent + let metadata_database_connection_details = druid + .spec + .cluster_config + .metadata_database + .jdbc_connection_details("metadata") + .context(InvalidMetadataDatabaseConnectionSnafu)?; for (property_name_kind, config) in rolegroup_config { let mut conf: BTreeMap> = Default::default(); @@ -764,6 +756,49 @@ fn build_rolegroup_config_map( ); }; + conf.insert( + crate::crd::database::METADATA_STORAGE_TYPE.to_string(), + Some( + druid + .spec + .cluster_config + .metadata_database + .as_db_type() + .to_string(), + ), + ); + + conf.insert( + crate::crd::database::METADATA_STORAGE_CONNECTOR_CONNECT_URI.to_string(), + Some( + metadata_database_connection_details + .connection_url + .to_string(), + ), + ); + + if let Some(EnvVar { + name: username_env_name, + .. + }) = &metadata_database_connection_details.username_env + { + conf.insert( + crate::crd::database::METADATA_STORAGE_USER.to_string(), + Some(format!("${{env:{username_env_name}}}",)), + ); + } + + if let Some(EnvVar { + name: password_env_name, + .. + }) = &metadata_database_connection_details.password_env + { + conf.insert( + crate::crd::database::METADATA_STORAGE_PASSWORD.to_string(), + Some(format!("${{env:{password_env_name}}}",)), + ); + } + if let Some(s3) = s3_conn { if !s3.region.is_default_config() { // Raising this as warning instead of returning an error, better safe than sorry. @@ -940,12 +975,13 @@ fn build_rolegroup_statefulset( ) .context(GracefulShutdownSnafu)?; - let credentials_secret = druid + let metadata_database_connection_details = druid .spec .cluster_config - .metadata_storage_database - .credentials_secret - .as_ref(); + .metadata_database + .jdbc_connection_details("metadata") + .context(InvalidMetadataDatabaseConnectionSnafu)?; + let mut main_container_commands = role.main_container_prepare_commands(s3_conn); let mut prepare_container_commands = vec![]; if let Some(ContainerLogConfig { @@ -1030,12 +1066,13 @@ fn build_rolegroup_statefulset( .build(), ); + metadata_database_connection_details.add_to_container(&mut cb_druid); + // rest of env let mut rest_env = rolegroup_config .get(&PropertyNameKind::Env) .iter() .flat_map(|env_vars| env_vars.iter()) - .filter(|(k, _)| k != &&CREDENTIALS_SECRET_PROPERTY.to_string()) .map(|(k, v)| EnvVar { name: k.clone(), value: Some(v.clone()), @@ -1043,21 +1080,6 @@ fn build_rolegroup_statefulset( }) .collect::>(); - // load database credentials to environment variables: these will be used to replace - // the placeholders in runtime.properties so that the operator does not "touch" the secret. - if let Some(credentials_secret_name) = credentials_secret { - rest_env.push(env_var_from_secret( - credentials_secret_name, - Some("username"), - DB_USERNAME_ENV, - )); - rest_env.push(env_var_from_secret( - credentials_secret_name, - Some("password"), - DB_PASSWORD_ENV, - )); - } - if let Some(auth_config) = druid_auth_config { rest_env.extend(auth_config.get_env_var_mounts(druid, role)) } diff --git a/rust/operator-binary/src/extensions.rs b/rust/operator-binary/src/extensions.rs index 209a3948..8b39752f 100644 --- a/rust/operator-binary/src/extensions.rs +++ b/rust/operator-binary/src/extensions.rs @@ -4,7 +4,7 @@ use tracing::debug; use crate::{ authentication::DruidAuthenticationConfig, - crd::{DbType, security::DruidTlsSecurity, v1alpha1}, + crd::{database::MetadataDatabaseConnection, security::DruidTlsSecurity, v1alpha1}, }; const EXT_S3: &str = "druid-s3-extensions"; @@ -33,12 +33,12 @@ pub fn get_extension_list( EXT_HDFS.to_string(), ]); - match druid.spec.cluster_config.metadata_storage_database.db_type { - DbType::Derby => {} // no additional extensions required - DbType::Postgresql => { + match druid.spec.cluster_config.metadata_database { + MetadataDatabaseConnection::Derby(..) => {} // no additional extensions required + MetadataDatabaseConnection::Postgresql(..) => { extensions.insert(EXT_PSQL_MD_ST.to_string()); } - DbType::Mysql => { + MetadataDatabaseConnection::Mysql(..) => { extensions.insert(EXT_MYSQL_MD_ST.to_string()); } }; diff --git a/rust/operator-binary/test/resources/crd/resource_merge/druid_cluster.yaml b/rust/operator-binary/test/resources/crd/resource_merge/druid_cluster.yaml index 8b75a020..b0811a90 100644 --- a/rust/operator-binary/test/resources/crd/resource_merge/druid_cluster.yaml +++ b/rust/operator-binary/test/resources/crd/resource_merge/druid_cluster.yaml @@ -13,12 +13,11 @@ spec: hdfs: configMapName: simple-hdfs directory: /druid - metadataStorageDatabase: - dbType: postgresql - connString: jdbc:postgresql://druid-postgresql/druid - host: druid-postgresql - port: 5432 - credentialsSecret: mySecret + metadataDatabase: + postgresql: + host: druid-postgresql + database: druid + credentialsSecretName: mySecret zookeeperConfigMapName: psql-druid-znode brokers: roleGroups: diff --git a/rust/operator-binary/test/resources/crd/resource_merge/segment_cache.yaml b/rust/operator-binary/test/resources/crd/resource_merge/segment_cache.yaml index adc53f2d..bab1a998 100644 --- a/rust/operator-binary/test/resources/crd/resource_merge/segment_cache.yaml +++ b/rust/operator-binary/test/resources/crd/resource_merge/segment_cache.yaml @@ -13,12 +13,11 @@ spec: hdfs: configMapName: simple-hdfs directory: /druid - metadataStorageDatabase: - dbType: postgresql - connString: jdbc:postgresql://druid-postgresql/druid - host: druid-postgresql - port: 5432 - credentialsSecret: mySecret + metadataDatabase: + postgresql: + host: druid-postgresql + database: druid + credentialsSecretName: mySecret zookeeperConfigMapName: psql-druid-znode brokers: roleGroups: diff --git a/rust/operator-binary/test/resources/crd/role_service/druid_cluster.yaml b/rust/operator-binary/test/resources/crd/role_service/druid_cluster.yaml index ffb9a6a9..507870c1 100644 --- a/rust/operator-binary/test/resources/crd/role_service/druid_cluster.yaml +++ b/rust/operator-binary/test/resources/crd/role_service/druid_cluster.yaml @@ -13,12 +13,11 @@ spec: hdfs: configMapName: simple-hdfs directory: /druid - metadataStorageDatabase: - dbType: postgresql - connString: jdbc:postgresql://druid-postgresql/druid - host: druid-postgresql - port: 5432 - credentialsSecret: mySecret + metadataDatabase: + postgresql: + host: druid-postgresql + database: druid + credentialsSecretName: mySecret zookeeperConfigMapName: psql-druid-znode brokers: roleGroups: diff --git a/rust/operator-binary/test/resources/druid_controller/properties.yaml b/rust/operator-binary/test/resources/druid_controller/properties.yaml index 2fb07f49..e1e167d9 100644 --- a/rust/operator-binary/test/resources/druid_controller/properties.yaml +++ b/rust/operator-binary/test/resources/druid_controller/properties.yaml @@ -9,13 +9,15 @@ spec: - "/tmp/xyz" - unit: &unitPort name: "port" - regex: "^([0-9]{1,4}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])$" + regex: "^([0-9]{1,4}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]\ + |6553[0-5])$" - unit: &unitPrometheusNamespace name: "prometheusNamespace" regex: "^[a-zA-Z_:][a-zA-Z0-9_:]*$" - unit: &unitDuration name: "duration" - regex: "^P(?!$)(\\d+Y)?(\\d+M)?(\\d+W)?(\\d+D)?(T(?=\\d)(\\d+H)?(\\d+M)?(\\d+S)?)?$" + regex: "^P(?!$)(\\d+Y)?(\\d+M)?(\\d+W)?(\\d+D)?(T(?=\\d)(\\d+H)?(\\d+M)?(\\d+S)\ + ?)?$" examples: - "PT300S" @@ -229,139 +231,6 @@ properties: required: true asOfVersion: "0.0.0" - - property: &metadataStorageType - propertyNames: - - name: "druid.metadata.storage.type" - kind: - type: "file" - file: "runtime.properties" - datatype: - type: "string" - allowedValues: - - "mysql" - - "postgresql" - - "derby" - roles: - - name: "broker" - required: true - - name: "coordinator" - required: true - - name: "historical" - required: true - - name: "middlemanager" - required: true - - name: "router" - required: true - asOfVersion: "0.0.0" - - - property: &metadataStorageConnectURI - propertyNames: - - name: "druid.metadata.storage.connector.connectURI" - kind: - type: "file" - file: "runtime.properties" - datatype: - type: "string" - roles: - - name: "broker" - required: true - - name: "coordinator" - required: true - - name: "historical" - required: true - - name: "middlemanager" - required: true - - name: "router" - required: true - asOfVersion: "0.0.0" - - - property: &metadataStorageHost - propertyNames: - - name: "druid.metadata.storage.connector.host" - kind: - type: "file" - file: "runtime.properties" - datatype: - type: "string" - roles: - - name: "broker" - required: true - - name: "coordinator" - required: true - - name: "historical" - required: true - - name: "middlemanager" - required: true - - name: "router" - required: true - asOfVersion: "0.0.0" - - - property: &metadataStoragePort - propertyNames: - - name: "druid.metadata.storage.connector.port" - kind: - type: "file" - file: "runtime.properties" - datatype: - type: "integer" - min: "1024" - max: "65535" - unit: *unitPort - roles: - - name: "broker" - required: true - - name: "coordinator" - required: true - - name: "historical" - required: true - - name: "middlemanager" - required: true - - name: "router" - required: true - asOfVersion: "0.0.0" - - - property: &metadataStorageUser - propertyNames: - - name: "druid.metadata.storage.connector.user" - kind: - type: "file" - file: "runtime.properties" - datatype: - type: "string" - roles: - - name: "broker" - required: false - - name: "coordinator" - required: false - - name: "historical" - required: false - - name: "middlemanager" - required: false - - name: "router" - required: false - asOfVersion: "0.0.0" - - - property: &metadataStoragePassword - propertyNames: - - name: "druid.metadata.storage.connector.password" - kind: - type: "file" - file: "runtime.properties" - datatype: - type: "string" - roles: - - name: "broker" - required: false - - name: "coordinator" - required: false - - name: "historical" - required: false - - name: "middlemanager" - required: false - - name: "router" - required: false - asOfVersion: "0.0.0" - - property: &indexerLogsDirectory propertyNames: - name: "druid.indexer.logs.directory" @@ -472,7 +341,10 @@ properties: type: "string" defaultValues: - fromVersion: "0.0.0" - value: "-server -Xms256m -Xmx256m -XX:MaxDirectMemorySize=300m -Duser.timezone=UTC -Dfile.encoding=UTF-8 -XX:+ExitOnOutOfMemoryError -Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager" + value: "-server -Xms256m -Xmx256m -XX:MaxDirectMemorySize=300m + -Duser.timezone=UTC -Dfile.encoding=UTF-8 + -XX:+ExitOnOutOfMemoryError + -Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager" roles: - name: "broker" required: false diff --git a/rust/operator-binary/test/resources/druid_controller/segment_cache.yaml b/rust/operator-binary/test/resources/druid_controller/segment_cache.yaml index dde8c761..ddb2811b 100644 --- a/rust/operator-binary/test/resources/druid_controller/segment_cache.yaml +++ b/rust/operator-binary/test/resources/druid_controller/segment_cache.yaml @@ -21,12 +21,11 @@ spec: accessStyle: Path credentials: secretClass: druid-s3-credentials - metadataStorageDatabase: - dbType: postgresql - connString: jdbc:postgresql://druid-postgresql/druid - host: druid-postgresql - port: 5432 - credentialsSecret: mySecret + metadataDatabase: + postgresql: + host: druid-postgresql + database: druid + credentialsSecretName: mySecret zookeeperConfigMapName: psql-druid-znode brokers: roleGroups: diff --git a/rust/operator-binary/test/resources/druid_controller/simple.yaml b/rust/operator-binary/test/resources/druid_controller/simple.yaml index 672bb2a2..bd4983ac 100644 --- a/rust/operator-binary/test/resources/druid_controller/simple.yaml +++ b/rust/operator-binary/test/resources/druid_controller/simple.yaml @@ -13,12 +13,11 @@ spec: hdfs: configMapName: simple-hdfs directory: /druid - metadataStorageDatabase: - dbType: postgresql - connString: jdbc:postgresql://druid-postgresql/druid - host: druid-postgresql - port: 5432 - credentialsSecret: mySecret + metadataDatabase: + postgresql: + host: druid-postgresql + database: druid + credentialsSecretName: mySecret zookeeperConfigMapName: psql-druid-znode additionalExtensions: - druid-avro-extensions diff --git a/tests/interu.yaml b/tests/interu.yaml index f1835759..56bafbdd 100644 --- a/tests/interu.yaml +++ b/tests/interu.yaml @@ -1,3 +1,4 @@ +--- runners: amd64: platform: aks-1.32 diff --git a/tests/templates/kuttl/authorizer/04-install-druid.yaml.j2 b/tests/templates/kuttl/authorizer/04-install-druid.yaml.j2 index 3ca3911a..764bd2c5 100644 --- a/tests/templates/kuttl/authorizer/04-install-druid.yaml.j2 +++ b/tests/templates/kuttl/authorizer/04-install-druid.yaml.j2 @@ -27,11 +27,8 @@ spec: hdfs: configMapName: druid-hdfs directory: /druid - metadataStorageDatabase: - dbType: derby - connString: jdbc:derby://localhost:1527/var/druid/metadata.db;create=true - host: localhost - port: 1527 + metadataDatabase: + derby: {} {% if lookup('env', 'VECTOR_AGGREGATOR') %} vectorAggregatorConfigMapName: vector-aggregator-discovery {% endif %} diff --git a/tests/templates/kuttl/cluster-operation/30-install-druid.yaml.j2 b/tests/templates/kuttl/cluster-operation/30-install-druid.yaml.j2 index 96fe77cf..78d0e554 100644 --- a/tests/templates/kuttl/cluster-operation/30-install-druid.yaml.j2 +++ b/tests/templates/kuttl/cluster-operation/30-install-druid.yaml.j2 @@ -21,11 +21,8 @@ spec: hdfs: configMapName: druid-hdfs directory: /druid - metadataStorageDatabase: - dbType: derby - connString: jdbc:derby://localhost:1527/var/druid/metadata.db;create=true - host: localhost - port: 1527 + metadataDatabase: + derby: {} {% if lookup('env', 'VECTOR_AGGREGATOR') %} vectorAggregatorConfigMapName: vector-aggregator-discovery {% endif %} diff --git a/tests/templates/kuttl/cluster-operation/40-stop-druid.yaml.j2 b/tests/templates/kuttl/cluster-operation/40-stop-druid.yaml.j2 index 92a03b8c..42beb115 100644 --- a/tests/templates/kuttl/cluster-operation/40-stop-druid.yaml.j2 +++ b/tests/templates/kuttl/cluster-operation/40-stop-druid.yaml.j2 @@ -20,11 +20,8 @@ spec: hdfs: configMapName: druid-hdfs directory: /druid - metadataStorageDatabase: - dbType: derby - connString: jdbc:derby://localhost:1527/var/druid/metadata.db;create=true - host: localhost - port: 1527 + metadataDatabase: + derby: {} {% if lookup('env', 'VECTOR_AGGREGATOR') %} vectorAggregatorConfigMapName: vector-aggregator-discovery {% endif %} diff --git a/tests/templates/kuttl/cluster-operation/50-pause-druid.yaml.j2 b/tests/templates/kuttl/cluster-operation/50-pause-druid.yaml.j2 index 420e199c..84b1ead8 100644 --- a/tests/templates/kuttl/cluster-operation/50-pause-druid.yaml.j2 +++ b/tests/templates/kuttl/cluster-operation/50-pause-druid.yaml.j2 @@ -20,11 +20,8 @@ spec: hdfs: configMapName: druid-hdfs directory: /druid - metadataStorageDatabase: - dbType: derby - connString: jdbc:derby://localhost:1527/var/druid/metadata.db;create=true - host: localhost - port: 1527 + metadataDatabase: + derby: {} {% if lookup('env', 'VECTOR_AGGREGATOR') %} vectorAggregatorConfigMapName: vector-aggregator-discovery {% endif %} diff --git a/tests/templates/kuttl/cluster-operation/60-restart-druid.yaml.j2 b/tests/templates/kuttl/cluster-operation/60-restart-druid.yaml.j2 index a40b727e..5053c55f 100644 --- a/tests/templates/kuttl/cluster-operation/60-restart-druid.yaml.j2 +++ b/tests/templates/kuttl/cluster-operation/60-restart-druid.yaml.j2 @@ -20,11 +20,8 @@ spec: hdfs: configMapName: druid-hdfs directory: /druid - metadataStorageDatabase: - dbType: derby - connString: jdbc:derby://localhost:1527/var/druid/metadata.db;create=true - host: localhost - port: 1527 + metadataDatabase: + derby: {} {% if lookup('env', 'VECTOR_AGGREGATOR') %} vectorAggregatorConfigMapName: vector-aggregator-discovery {% endif %} diff --git a/tests/templates/kuttl/external-access/50_druid.yaml.j2 b/tests/templates/kuttl/external-access/50_druid.yaml.j2 index 904b6001..fed375fe 100644 --- a/tests/templates/kuttl/external-access/50_druid.yaml.j2 +++ b/tests/templates/kuttl/external-access/50_druid.yaml.j2 @@ -13,12 +13,11 @@ spec: {% endif %} pullPolicy: IfNotPresent clusterConfig: - metadataStorageDatabase: - dbType: postgresql - connString: jdbc:postgresql://druid-postgresql/druid - host: druid-postgresql - port: 5432 - credentialsSecret: druid-credentials + metadataDatabase: + postgresql: + host: druid-postgresql + database: druid + credentialsSecretName: druid-credentials deepStorage: hdfs: configMapName: druid-hdfs diff --git a/tests/templates/kuttl/hdfs-deep-storage/03-install-druid.yaml.j2 b/tests/templates/kuttl/hdfs-deep-storage/03-install-druid.yaml.j2 index 17842f7b..dcca5636 100644 --- a/tests/templates/kuttl/hdfs-deep-storage/03-install-druid.yaml.j2 +++ b/tests/templates/kuttl/hdfs-deep-storage/03-install-druid.yaml.j2 @@ -23,11 +23,8 @@ spec: hdfs: configMapName: druid-hdfs directory: /druid - metadataStorageDatabase: - dbType: derby - connString: jdbc:derby://localhost:1527/var/druid/metadata.db;create=true - host: localhost - port: 1527 + metadataDatabase: + derby: {} {% if lookup('env', 'VECTOR_AGGREGATOR') %} vectorAggregatorConfigMapName: vector-aggregator-discovery {% endif %} diff --git a/tests/templates/kuttl/ingestion-no-s3-ext/03-install-druid.yaml.j2 b/tests/templates/kuttl/ingestion-no-s3-ext/03-install-druid.yaml.j2 index 32913f02..c71310e7 100644 --- a/tests/templates/kuttl/ingestion-no-s3-ext/03-install-druid.yaml.j2 +++ b/tests/templates/kuttl/ingestion-no-s3-ext/03-install-druid.yaml.j2 @@ -23,11 +23,8 @@ spec: hdfs: configMapName: druid-hdfs directory: /druid - metadataStorageDatabase: - dbType: derby - connString: jdbc:derby://localhost:1527/var/druid/metadata.db;create=true - host: localhost - port: 1527 + metadataDatabase: + derby: {} {% if lookup('env', 'VECTOR_AGGREGATOR') %} vectorAggregatorConfigMapName: vector-aggregator-discovery {% endif %} diff --git a/tests/templates/kuttl/ingestion-s3-ext/03-install-druid.yaml.j2 b/tests/templates/kuttl/ingestion-s3-ext/03-install-druid.yaml.j2 index 048c34f1..e12bed4e 100644 --- a/tests/templates/kuttl/ingestion-s3-ext/03-install-druid.yaml.j2 +++ b/tests/templates/kuttl/ingestion-s3-ext/03-install-druid.yaml.j2 @@ -29,11 +29,8 @@ spec: host: s3-de-central.profitbricks.com credentials: secretClass: s3-credentials-class - metadataStorageDatabase: - dbType: derby - connString: jdbc:derby://localhost:1527/var/druid/metadata.db;create=true - host: localhost - port: 1527 + metadataDatabase: + derby: {} {% if lookup('env', 'VECTOR_AGGREGATOR') %} vectorAggregatorConfigMapName: vector-aggregator-discovery {% endif %} diff --git a/tests/templates/kuttl/ldap/13-install-druid.yaml.j2 b/tests/templates/kuttl/ldap/13-install-druid.yaml.j2 index f172afd4..3d0532ec 100644 --- a/tests/templates/kuttl/ldap/13-install-druid.yaml.j2 +++ b/tests/templates/kuttl/ldap/13-install-druid.yaml.j2 @@ -27,11 +27,8 @@ commands: hdfs: configMapName: druid-hdfs directory: /druid - metadataStorageDatabase: - dbType: derby - connString: jdbc:derby://localhost:1527/var/druid/metadata.db;create=true - host: localhost - port: 1527 + metadataDatabase: + derby: {} zookeeperConfigMapName: druid-znode {% if lookup('env', 'VECTOR_AGGREGATOR') %} vectorAggregatorConfigMapName: vector-aggregator-discovery diff --git a/tests/templates/kuttl/logging/05-install-druid.yaml.j2 b/tests/templates/kuttl/logging/05-install-druid.yaml.j2 index 859c1e98..38a2fc1f 100644 --- a/tests/templates/kuttl/logging/05-install-druid.yaml.j2 +++ b/tests/templates/kuttl/logging/05-install-druid.yaml.j2 @@ -40,11 +40,8 @@ spec: hdfs: configMapName: druid-hdfs directory: /druid - metadataStorageDatabase: - dbType: derby - connString: jdbc:derby://localhost:1527/var/druid/metadata.db;create=true - host: localhost - port: 1527 + metadataDatabase: + derby: {} vectorAggregatorConfigMapName: druid-vector-aggregator-discovery zookeeperConfigMapName: druid-znode brokers: diff --git a/tests/templates/kuttl/oidc/40_druid.yaml.j2 b/tests/templates/kuttl/oidc/40_druid.yaml.j2 index f7ebabe5..2b85a0ee 100644 --- a/tests/templates/kuttl/oidc/40_druid.yaml.j2 +++ b/tests/templates/kuttl/oidc/40_druid.yaml.j2 @@ -58,11 +58,8 @@ spec: secretClass: minio-tls-certificates {% endif %} baseKey: deep-storage - metadataStorageDatabase: - dbType: derby - connString: jdbc:derby://localhost:1527/var/druid/metadata.db;create=true - host: localhost - port: 1527 + metadataDatabase: + derby: {} {% if lookup('env', 'VECTOR_AGGREGATOR') %} vectorAggregatorConfigMapName: vector-aggregator-discovery {% endif %} diff --git a/tests/templates/kuttl/orphaned-resources/03-install-druid.yaml.j2 b/tests/templates/kuttl/orphaned-resources/03-install-druid.yaml.j2 index 32913f02..c71310e7 100644 --- a/tests/templates/kuttl/orphaned-resources/03-install-druid.yaml.j2 +++ b/tests/templates/kuttl/orphaned-resources/03-install-druid.yaml.j2 @@ -23,11 +23,8 @@ spec: hdfs: configMapName: druid-hdfs directory: /druid - metadataStorageDatabase: - dbType: derby - connString: jdbc:derby://localhost:1527/var/druid/metadata.db;create=true - host: localhost - port: 1527 + metadataDatabase: + derby: {} {% if lookup('env', 'VECTOR_AGGREGATOR') %} vectorAggregatorConfigMapName: vector-aggregator-discovery {% endif %} diff --git a/tests/templates/kuttl/orphaned-resources/04-update-druid.yaml.j2 b/tests/templates/kuttl/orphaned-resources/04-update-druid.yaml.j2 index f7753cd8..39a7aea0 100644 --- a/tests/templates/kuttl/orphaned-resources/04-update-druid.yaml.j2 +++ b/tests/templates/kuttl/orphaned-resources/04-update-druid.yaml.j2 @@ -23,11 +23,8 @@ spec: hdfs: configMapName: druid-hdfs directory: /druid - metadataStorageDatabase: - dbType: derby - connString: jdbc:derby://localhost:1527/var/druid/metadata.db;create=true - host: localhost - port: 1527 + metadataDatabase: + derby: {} {% if lookup('env', 'VECTOR_AGGREGATOR') %} vectorAggregatorConfigMapName: vector-aggregator-discovery {% endif %} diff --git a/tests/templates/kuttl/overrides/20-install-druid.yaml.j2 b/tests/templates/kuttl/overrides/20-install-druid.yaml.j2 index 8f6e45c3..28a76f9d 100644 --- a/tests/templates/kuttl/overrides/20-install-druid.yaml.j2 +++ b/tests/templates/kuttl/overrides/20-install-druid.yaml.j2 @@ -17,11 +17,8 @@ spec: hdfs: configMapName: hdfs directory: /data - metadataStorageDatabase: - dbType: derby - connString: jdbc:derby://localhost:1527/var/druid/metadata.db;create=true - host: localhost - port: 1527 + metadataDatabase: + derby: {} tls: null zookeeperConfigMapName: druid-znode brokers: diff --git a/tests/templates/kuttl/resources/30-install-druid.yaml.j2 b/tests/templates/kuttl/resources/30-install-druid.yaml.j2 index 9a6d7778..0537a2b5 100644 --- a/tests/templates/kuttl/resources/30-install-druid.yaml.j2 +++ b/tests/templates/kuttl/resources/30-install-druid.yaml.j2 @@ -27,11 +27,8 @@ spec: host: minio-druid port: 9000 accessStyle: Path - metadataStorageDatabase: - dbType: derby - connString: jdbc:derby://localhost:1527/var/druid/metadata.db;create=true - host: localhost - port: 1527 + metadataDatabase: + derby: {} {% if lookup('env', 'VECTOR_AGGREGATOR') %} vectorAggregatorConfigMapName: vector-aggregator-discovery {% endif %} diff --git a/tests/templates/kuttl/s3-deep-storage/10-install-druid.yaml.j2 b/tests/templates/kuttl/s3-deep-storage/10-install-druid.yaml.j2 index e2399ddc..00cc99b4 100644 --- a/tests/templates/kuttl/s3-deep-storage/10-install-druid.yaml.j2 +++ b/tests/templates/kuttl/s3-deep-storage/10-install-druid.yaml.j2 @@ -59,11 +59,8 @@ spec: secretClass: minio-tls-certificates {% endif %} baseKey: deep-storage - metadataStorageDatabase: - dbType: derby - connString: jdbc:derby://localhost:1527/var/druid/metadata.db;create=true - host: localhost - port: 1527 + metadataDatabase: + derby: {} {% if lookup('env', 'VECTOR_AGGREGATOR') %} vectorAggregatorConfigMapName: vector-aggregator-discovery {% endif %} diff --git a/tests/templates/kuttl/smoke/50-install-druid.yaml.j2 b/tests/templates/kuttl/smoke/50-install-druid.yaml.j2 index 73f2c4bd..939a8170 100644 --- a/tests/templates/kuttl/smoke/50-install-druid.yaml.j2 +++ b/tests/templates/kuttl/smoke/50-install-druid.yaml.j2 @@ -23,12 +23,11 @@ spec: hdfs: configMapName: druid-hdfs directory: /druid - metadataStorageDatabase: - dbType: postgresql - connString: jdbc:postgresql://druid-postgresql/druid - host: druid-postgresql - port: 5432 - credentialsSecret: druid-credentials + metadataDatabase: + postgresql: + host: druid-postgresql + database: druid + credentialsSecretName: druid-credentials {% if lookup('env', 'VECTOR_AGGREGATOR') %} vectorAggregatorConfigMapName: vector-aggregator-discovery {% endif %} diff --git a/tests/templates/kuttl/tls/04-install-druid.yaml.j2 b/tests/templates/kuttl/tls/04-install-druid.yaml.j2 index 5f0e68a7..d754c1d7 100644 --- a/tests/templates/kuttl/tls/04-install-druid.yaml.j2 +++ b/tests/templates/kuttl/tls/04-install-druid.yaml.j2 @@ -83,11 +83,8 @@ spec: caCert: secretClass: minio-tls-certificates baseKey: deep-storage - metadataStorageDatabase: - dbType: derby - connString: jdbc:derby://localhost:1527/var/druid/metadata.db;create=true - host: localhost - port: 1527 + metadataDatabase: + derby: {} {% if test_scenario['values']['tls-mode'] == 'internal-and-server-tls' or test_scenario['values']['tls-mode'] == 'internal-and-server-tls-and-tls-client-auth' %} tls: serverAndInternalSecretClass: druid-tls