From 0a130e4bf151853c23a78a1a4fdae021c3019a2d Mon Sep 17 00:00:00 2001 From: Ramiro Algozino Date: Mon, 11 Nov 2024 14:57:11 +0100 Subject: [PATCH] feat(docs/schema): improve eks, kfdisitribution schema docs Port the improvements done for on-prem schema in #268 to the EKS and KFDDistribution schemas and banners used to render the markdown. Some minor improvements to On-premises too. fixes https://github.com/sighupio/product-management/issues/543 https://github.com/sighupio/furyctl/issues/287 --- banners/ekscluster.md | 8 +- banners/kfddistribution.md | 8 +- banners/onpremises.md | 8 +- defaults/ekscluster-kfd-v1alpha2.yaml | 2 +- docs/schemas/ekscluster-kfd-v1alpha2.md | 1017 ++++-- docs/schemas/kfddistribution-kfd-v1alpha2.md | 716 ++-- docs/schemas/onpremises-kfd-v1alpha2.md | 267 +- .../ekscluster/v1alpha2/private/schema.go | 3229 +++++++++-------- pkg/apis/ekscluster/v1alpha2/public/schema.go | 3135 ++++++++-------- .../kfddistribution/v1alpha2/public/schema.go | 2487 +++++++------ pkg/apis/onpremises/v1alpha2/public/schema.go | 48 +- schemas/private/ekscluster-kfd-v1alpha2.json | 426 ++- schemas/public/ekscluster-kfd-v1alpha2.json | 426 ++- .../public/kfddistribution-kfd-v1alpha2.json | 267 +- schemas/public/onpremises-kfd-v1alpha2.json | 42 +- .../config/ekscluster-kfd-v1alpha2.yaml.tpl | 4 +- 16 files changed, 6781 insertions(+), 5309 deletions(-) diff --git a/banners/ekscluster.md b/banners/ekscluster.md index a66d70188..873a47e0c 100644 --- a/banners/ekscluster.md +++ b/banners/ekscluster.md @@ -2,5 +2,11 @@ This document explains the full schema for the `kind: EKSCluster` for the `furyctl.yaml` file used by `furyctl`. This configuration file will be used to deploy a Kubernetes Fury Cluster deployed through AWS's Elastic Kubernetes Service. -An example file can be found [here](https://github.com/sighupio/fury-distribution/blob/feature/schema-docs/templates/config/ekscluster-kfd-v1alpha2.yaml.tpl). +An example configuration file can be created by running the following command: +```bash +furyctl create config --kind EKSCluster --version v1.29.4 --name example-cluster +``` + +> [!NOTE] +> Replace the version with your desired version of KFD. diff --git a/banners/kfddistribution.md b/banners/kfddistribution.md index a44f13847..797d2678f 100644 --- a/banners/kfddistribution.md +++ b/banners/kfddistribution.md @@ -2,5 +2,11 @@ This document explains the full schema for the `kind: KFDDistribution` for the `furyctl.yaml` file used by `furyctl`. This configuration file will be used to deploy the Kubernetes Fury Distribution modules on top of an existing Kubernetes cluster. -An example file can be found [here](https://github.com/sighupio/fury-distribution/blob/feature/schema-docs/templates/config/kfddistribution-kfd-v1alpha2.yaml.tpl). +An example configuration file can be created by running the following command: +```bash +furyctl create config --kind KFDDistribution --version v1.29.4 --name example-cluster +``` + +> [!NOTE] +> Replace the version with your desired version of KFD. diff --git a/banners/onpremises.md b/banners/onpremises.md index a8d8983dd..7f05c77c8 100644 --- a/banners/onpremises.md +++ b/banners/onpremises.md @@ -2,5 +2,11 @@ This document explains the full schema for the `kind: OnPremises` for the `furyctl.yaml` file used by `furyctl`. This configuration file will be used to deploy the Kubernetes Fury Distribution modules and cluster on premises. -An example file can be found [here](https://github.com/sighupio/fury-distribution/blob/feature/schema-docs/templates/config/onpremises-kfd-v1alpha2.yaml.tpl). +An example configuration file can be created by running the following command: +```bash +furyctl create config --kind OnPremises --version v1.29.4 --name example-cluster +``` + +> [!NOTE] +> Replace the version with your desired version of KFD. diff --git a/defaults/ekscluster-kfd-v1alpha2.yaml b/defaults/ekscluster-kfd-v1alpha2.yaml index 6c708be00..a5dabb722 100644 --- a/defaults/ekscluster-kfd-v1alpha2.yaml +++ b/defaults/ekscluster-kfd-v1alpha2.yaml @@ -47,7 +47,7 @@ data: name: "" create: true # internal field, should be either the VPC ID taken from the kubernetes - # phase or the ID of the created VPC in the Ifra phase + # phase or the ID of the created VPC in the Infra phase vpcId: "" # common configuration for nginx ingress controller nginx: diff --git a/docs/schemas/ekscluster-kfd-v1alpha2.md b/docs/schemas/ekscluster-kfd-v1alpha2.md index 7521e3c34..eb1efd5dd 100644 --- a/docs/schemas/ekscluster-kfd-v1alpha2.md +++ b/docs/schemas/ekscluster-kfd-v1alpha2.md @@ -2,8 +2,14 @@ This document explains the full schema for the `kind: EKSCluster` for the `furyctl.yaml` file used by `furyctl`. This configuration file will be used to deploy a Kubernetes Fury Cluster deployed through AWS's Elastic Kubernetes Service. -An example file can be found [here](https://github.com/sighupio/fury-distribution/blob/feature/schema-docs/templates/config/ekscluster-kfd-v1alpha2.yaml.tpl). +An example configuration file can be created by running the following command: +```bash +furyctl create config --kind EKSCluster --version v1.29.4 --name example-cluster +``` + +> [!NOTE] +> Replace the version with your desired version of KFD. ## Properties | Property | Type | Required | @@ -15,7 +21,7 @@ An example file can be found [here](https://github.com/sighupio/fury-distributio ### Description -A Fury Cluster deployed through AWS's Elastic Kubernetes Service +A KFD Cluster deployed on top of AWS's Elastic Kubernetes Service (EKS). ## .apiVersion @@ -33,7 +39,7 @@ A Fury Cluster deployed through AWS's Elastic Kubernetes Service ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------| @@ -49,6 +55,10 @@ A Fury Cluster deployed through AWS's Elastic Kubernetes Service ## .metadata.name +### Description + +The name of the cluster. It will also be used as a prefix for all the other resources created. + ### Constraints **maximum length**: the maximum number of characters for this string is: `56` @@ -92,11 +102,15 @@ A Fury Cluster deployed through AWS's Elastic Kubernetes Service | [relativeVendorPath](#specdistributioncommonrelativevendorpath) | `string` | Optional | | [tolerations](#specdistributioncommontolerations) | `array` | Optional | +### Description + +Common configuration for all the distribution modules. + ## .spec.distribution.common.nodeSelector ### Description -The node selector to use to place the pods for all the KFD modules +The node selector to use to place the pods for all the KFD modules. Follows Kubernetes selector format. Example: `node.kubernetes.io/role: infra`. ## .spec.distribution.common.provider @@ -110,21 +124,19 @@ The node selector to use to place the pods for all the KFD modules ### Description -The type of the provider, must be EKS if specified +The provider type. Don't set. FOR INTERNAL USE ONLY. ## .spec.distribution.common.registry ### Description -URL of the registry where to pull images from for the Distribution phase. (Default is registry.sighup.io/fury). - -NOTE: If plugins are pulling from the default registry, the registry will be replaced for these plugins too. +URL of the registry where to pull images from for the Distribution phase. (Default is `registry.sighup.io/fury`). ## .spec.distribution.common.relativeVendorPath ### Description -The relative path to the vendor directory, does not need to be changed +The relative path to the vendor directory, does not need to be changed. ## .spec.distribution.common.tolerations @@ -139,13 +151,19 @@ The relative path to the vendor directory, does not need to be changed ### Description -The tolerations that will be added to the pods for all the KFD modules +An array with the tolerations that will be added to the pods for all the KFD modules. Follows Kubernetes tolerations format. Example: + +```yaml +- effect: NoSchedule + key: node.kubernetes.io/role + value: infra +``` ## .spec.distribution.common.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -163,7 +181,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -210,7 +228,7 @@ The behavior of the configmap ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:----------| @@ -418,7 +436,7 @@ The behavior of the secret ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:----------| @@ -525,11 +543,15 @@ The type of the secret | [pomerium](#specdistributionmodulesauthpomerium) | `object` | Optional | | [provider](#specdistributionmodulesauthprovider) | `object` | Required | +### Description + +Configuration for the Auth module. + ## .spec.distribution.modules.auth.baseDomain ### Description -The base domain for the auth module +Base domain for the ingresses created by the Auth module (Gangplank, Pomerium, Dex). Notice that when nginx type is dual, these will use the `external` ingress class. ## .spec.distribution.modules.auth.dex @@ -542,17 +564,32 @@ The base domain for the auth module | [expiry](#specdistributionmodulesauthdexexpiry) | `object` | Optional | | [overrides](#specdistributionmodulesauthdexoverrides) | `object` | Optional | +### Description + +Configuration for the Dex package. + ## .spec.distribution.modules.auth.dex.additionalStaticClients ### Description -The additional static clients for dex +Additional static clients defitions that will be added to the default clients included with the distribution in Dex's configuration. Example: + +```yaml +additionalStaticClients: + - id: my-custom-client + name: "A custom additional static client" + redirectURIs: + - "https://myapp.tld/redirect" + - "https://alias.tld/oidc-callback" + secret: supersecretpassword +``` +Reference: https://dexidp.io/docs/connectors/local/ ## .spec.distribution.modules.auth.dex.connectors ### Description -The connectors for dex +A list with each item defining a Dex connector. Follows Dex connectors configuration format: https://dexidp.io/docs/connectors/ ## .spec.distribution.modules.auth.dex.expiry @@ -588,7 +625,7 @@ Dex signing key expiration time duration (default 6h). ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.auth.dex.overrides.tolerations @@ -603,13 +640,13 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.auth.dex.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -627,7 +664,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -650,13 +687,21 @@ The value of the toleration | [nodeSelector](#specdistributionmodulesauthoverridesnodeselector) | `object` | Optional | | [tolerations](#specdistributionmodulesauthoverridestolerations) | `array` | Optional | +### Description + +Override the common configuration with a particular configuration for the Auth module. + ## .spec.distribution.modules.auth.overrides.ingresses +### Description + +Override the definition of the Auth module ingresses. + ## .spec.distribution.modules.auth.overrides.nodeSelector ### Description -The node selector to use to place the pods for the auth module +Set to override the node selector used to place the pods of the Auth module. ## .spec.distribution.modules.auth.overrides.tolerations @@ -671,13 +716,13 @@ The node selector to use to place the pods for the auth module ### Description -The tolerations that will be added to the pods for the auth module +Set to override the tolerations that will be added to the pods of the Auth module. ## .spec.distribution.modules.auth.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -695,7 +740,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -791,7 +836,7 @@ override default routes for KFD components ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -805,7 +850,7 @@ override default routes for KFD components ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -895,27 +940,36 @@ cat ec_private.pem | base64 | [password](#specdistributionmodulesauthproviderbasicauthpassword) | `string` | Required | | [username](#specdistributionmodulesauthproviderbasicauthusername) | `string` | Required | +### Description + +Configuration for the HTTP Basic Auth provider. + ## .spec.distribution.modules.auth.provider.basicAuth.password ### Description -The password for the basic auth +The password for logging in with the HTTP basic authentication. ## .spec.distribution.modules.auth.provider.basicAuth.username ### Description -The username for the basic auth +The username for logging in with the HTTP basic authentication. ## .spec.distribution.modules.auth.provider.type ### Description -The type of the provider, must be ***none***, ***sso*** or ***basicAuth*** +The type of the Auth provider, options are: +- `none`: will disable authentication in the infrastructural ingresses. +- `sso`: will protect the infrastructural ingresses with Pomerium and Dex (SSO) and require authentication before accessing them. +- `basicAuth`: will protect the infrastructural ingresses with HTTP basic auth (username and password) authentication. + +Default is `none`. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:------------| @@ -969,7 +1023,7 @@ The type of the provider, must be ***none***, ***sso*** or ***basicAuth*** ### Description -The node selector to use to place the pods for the load balancer controller module +The node selector to use to place the pods for the load balancer controller module. ## .spec.distribution.modules.aws.clusterAutoscaler.overrides.tolerations @@ -984,13 +1038,13 @@ The node selector to use to place the pods for the load balancer controller modu ### Description -The tolerations that will be added to the pods for the cluster autoscaler module +The tolerations that will be added to the pods for the cluster autoscaler module. ## .spec.distribution.modules.aws.clusterAutoscaler.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -1008,7 +1062,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -1055,7 +1109,7 @@ The value of the toleration ### Description -The node selector to use to place the pods for the load balancer controller module +The node selector to use to place the pods for the load balancer controller module. ## .spec.distribution.modules.aws.ebsCsiDriver.overrides.tolerations @@ -1070,13 +1124,13 @@ The node selector to use to place the pods for the load balancer controller modu ### Description -The tolerations that will be added to the pods for the cluster autoscaler module +The tolerations that will be added to the pods for the cluster autoscaler module. ## .spec.distribution.modules.aws.ebsCsiDriver.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -1094,7 +1148,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -1128,7 +1182,7 @@ The value of the toleration ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.aws.ebsSnapshotController.overrides.tolerations @@ -1143,13 +1197,13 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.aws.ebsSnapshotController.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -1167,7 +1221,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -1214,7 +1268,7 @@ The value of the toleration ### Description -The node selector to use to place the pods for the load balancer controller module +The node selector to use to place the pods for the load balancer controller module. ## .spec.distribution.modules.aws.loadBalancerController.overrides.tolerations @@ -1229,13 +1283,13 @@ The node selector to use to place the pods for the load balancer controller modu ### Description -The tolerations that will be added to the pods for the cluster autoscaler module +The tolerations that will be added to the pods for the cluster autoscaler module. ## .spec.distribution.modules.aws.loadBalancerController.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -1253,7 +1307,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -1276,13 +1330,17 @@ The value of the toleration | [nodeSelector](#specdistributionmodulesawsoverridesnodeselector) | `object` | Optional | | [tolerations](#specdistributionmodulesawsoverridestolerations) | `array` | Optional | +### Description + +Override the common configuration with a particular configuration for the module. + ## .spec.distribution.modules.aws.overrides.ingresses ## .spec.distribution.modules.aws.overrides.nodeSelector ### Description -The node selector to use to place the pods for the dr module +Set to override the node selector used to place the pods of the module. ## .spec.distribution.modules.aws.overrides.tolerations @@ -1297,13 +1355,13 @@ The node selector to use to place the pods for the dr module ### Description -The tolerations that will be added to the pods for the monitoring module +Set to override the tolerations that will be added to the pods of the module. ## .spec.distribution.modules.aws.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -1321,7 +1379,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -1344,6 +1402,10 @@ The value of the toleration | [type](#specdistributionmodulesdrtype) | `string` | Required | | [velero](#specdistributionmodulesdrvelero) | `object` | Optional | +### Description + +Configuration for the Disaster Recovery module. + ## .spec.distribution.modules.dr.overrides ### Properties @@ -1354,13 +1416,17 @@ The value of the toleration | [nodeSelector](#specdistributionmodulesdroverridesnodeselector) | `object` | Optional | | [tolerations](#specdistributionmodulesdroverridestolerations) | `array` | Optional | +### Description + +Override the common configuration with a particular configuration for the module. + ## .spec.distribution.modules.dr.overrides.ingresses ## .spec.distribution.modules.dr.overrides.nodeSelector ### Description -The node selector to use to place the pods for the dr module +Set to override the node selector used to place the pods of the module. ## .spec.distribution.modules.dr.overrides.tolerations @@ -1375,13 +1441,13 @@ The node selector to use to place the pods for the dr module ### Description -The tolerations that will be added to the pods for the monitoring module +Set to override the tolerations that will be added to the pods of the module. ## .spec.distribution.modules.dr.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -1399,7 +1465,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -1416,11 +1482,13 @@ The value of the toleration ### Description -The type of the DR, must be ***none*** or ***eks*** +The type of the Disaster Recovery, must be `none` or `eks`. `none` disables the module and `eks` will install Velero and use an S3 bucket to store the backups. + +Default is `none`. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------| @@ -1450,17 +1518,17 @@ The type of the DR, must be ***none*** or ***eks*** ### Description -The name of the velero bucket +The name of the bucket for Velero. ## .spec.distribution.modules.dr.velero.eks.region ### Description -The region where the velero bucket is located +The region where the bucket for Velero will be located. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-----------------| @@ -1507,7 +1575,7 @@ The region where the velero bucket is located ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.dr.velero.overrides.tolerations @@ -1522,13 +1590,13 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.dr.velero.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -1546,7 +1614,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -1627,7 +1695,7 @@ The Time To Live (TTL) of the backups created by the backup schedules (default ` ### Description -the base domain used for all the KFD ingresses, if in the nginx dual configuration, it should be the same as the .spec.distribution.modules.ingress.dns.private.name zone +The base domain used for all the KFD ingresses. If in the nginx `dual` configuration type, this value should be the same as the `.spec.distribution.modules.ingress.dns.private.name` zone. ## .spec.distribution.modules.ingress.certManager @@ -1638,6 +1706,10 @@ the base domain used for all the KFD ingresses, if in the nginx dual configurati | [clusterIssuer](#specdistributionmodulesingresscertmanagerclusterissuer) | `object` | Required | | [overrides](#specdistributionmodulesingresscertmanageroverrides) | `object` | Optional | +### Description + +Configuration for the cert-manager package. Required even if `ingress.nginx.type` is `none`, cert-manager is used for managing other certificates in the cluster besides the TLS termination certificates for the ingresses. + ## .spec.distribution.modules.ingress.certManager.clusterIssuer ### Properties @@ -1649,33 +1721,37 @@ the base domain used for all the KFD ingresses, if in the nginx dual configurati | [solvers](#specdistributionmodulesingresscertmanagerclusterissuersolvers) | `array` | Optional | | [type](#specdistributionmodulesingresscertmanagerclusterissuertype) | `string` | Optional | +### Description + +Configuration for the cert-manager's ACME clusterIssuer used to request certificates from Let's Encrypt. + ## .spec.distribution.modules.ingress.certManager.clusterIssuer.email ### Description -The email of the cluster issuer +The email address to use during the certificate issuing process. ## .spec.distribution.modules.ingress.certManager.clusterIssuer.name ### Description -The name of the cluster issuer +Name of the clusterIssuer. ## .spec.distribution.modules.ingress.certManager.clusterIssuer.solvers ### Description -The custom solvers configurations +List of challenge solvers to use instead of the default one for the `http01` challenge. ## .spec.distribution.modules.ingress.certManager.clusterIssuer.type ### Description -The type of the cluster issuer, must be ***dns01*** or ***http01*** +The type of the clusterIssuer, must be `dns01` for using DNS challenge or `http01` for using HTTP challenge. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -1695,7 +1771,7 @@ The type of the cluster issuer, must be ***dns01*** or ***http01*** ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.ingress.certManager.overrides.tolerations @@ -1710,13 +1786,13 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.ingress.certManager.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -1734,7 +1810,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -1757,6 +1833,10 @@ The value of the toleration | [private](#specdistributionmodulesingressdnsprivate) | `object` | Required | | [public](#specdistributionmodulesingressdnspublic) | `object` | Required | +### Description + +DNS definition, used in conjunction with `externalDNS` package to automate DNS management and certificates emission. + ## .spec.distribution.modules.ingress.dns.overrides ### Properties @@ -1770,7 +1850,7 @@ The value of the toleration ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.ingress.dns.overrides.tolerations @@ -1785,13 +1865,13 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.ingress.dns.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -1809,7 +1889,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -1831,17 +1911,21 @@ The value of the toleration | [create](#specdistributionmodulesingressdnsprivatecreate) | `boolean` | Required | | [name](#specdistributionmodulesingressdnsprivatename) | `string` | Required | +### Description + +The private DNS zone is used only when `ingress.nginx.type` is `dual`, for exposing infrastructural services only in the private DNS zone. + ## .spec.distribution.modules.ingress.dns.private.create ### Description -If true, the private hosted zone will be created +By default, a Terraform data source will be used to get the private DNS zone. Set to `true` to create the private zone instead. ## .spec.distribution.modules.ingress.dns.private.name ### Description -The name of the private hosted zone +The name of the private hosted zone. Example: `internal.fury-demo.sighup.io`. ## .spec.distribution.modules.ingress.dns.public @@ -1856,13 +1940,13 @@ The name of the private hosted zone ### Description -If true, the public hosted zone will be created +By default, a Terraform data source will be used to get the public DNS zone. Set to `true` to create the public zone instead. ## .spec.distribution.modules.ingress.dns.public.name ### Description -The name of the public hosted zone +The name of the public hosted zone. ## .spec.distribution.modules.ingress.forecastle @@ -1885,7 +1969,7 @@ The name of the public hosted zone ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.ingress.forecastle.overrides.tolerations @@ -1900,13 +1984,13 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.ingress.forecastle.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -1924,7 +2008,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -1949,7 +2033,7 @@ The value of the toleration ### Description -Configurations for the nginx ingress controller module +Configurations for the Ingress nginx controller package. ## .spec.distribution.modules.ingress.nginx.overrides @@ -1964,7 +2048,7 @@ Configurations for the nginx ingress controller module ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.ingress.nginx.overrides.tolerations @@ -1979,13 +2063,13 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.ingress.nginx.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -2003,7 +2087,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -2029,11 +2113,11 @@ The value of the toleration ### Description -The provider of the TLS certificate, must be ***none***, ***certManager*** or ***secret*** +The provider of the TLS certificates for the ingresses, one of: `none`, `certManager`, or `secret`. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:--------------| @@ -2051,25 +2135,42 @@ The provider of the TLS certificate, must be ***none***, ***certManager*** or ** | [cert](#specdistributionmodulesingressnginxtlssecretcert) | `string` | Required | | [key](#specdistributionmodulesingressnginxtlssecretkey) | `string` | Required | +### Description + +Kubernetes TLS secret for the ingresses TLS certificate. + ## .spec.distribution.modules.ingress.nginx.tls.secret.ca +### Description + +The Certificate Authority certificate file's content. You can use the `"{file://}"` notation to get the content from a file. + ## .spec.distribution.modules.ingress.nginx.tls.secret.cert ### Description -The certificate file content or you can use the file notation to get the content from a file +The certificate file's content. You can use the `"{file://}"` notation to get the content from a file. ## .spec.distribution.modules.ingress.nginx.tls.secret.key +### Description + +The signing key file's content. You can use the `"{file://}"` notation to get the content from a file. + ## .spec.distribution.modules.ingress.nginx.type ### Description -The type of the nginx ingress controller, must be ***none***, ***single*** or ***dual*** +The type of the Ingress nginx controller, options are: +- `none`: no ingress controller will be installed and no infrastructural ingresses will be created. +- `single`: a single ingress controller with ingress class `nginx` will be installed to manage all the ingress resources, infrastructural ingresses will be created. +- `dual`: two independent ingress controllers will be installed, one for the `internal` ingress class intended for private ingresses and one for the `external` ingress class intended for public ingresses. KFD infrastructural ingresses wil use the `internal` ingress class when using the dual type. + +Default is `single`. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -2087,6 +2188,10 @@ The type of the nginx ingress controller, must be ***none***, ***single*** or ** | [nodeSelector](#specdistributionmodulesingressoverridesnodeselector) | `object` | Optional | | [tolerations](#specdistributionmodulesingressoverridestolerations) | `array` | Optional | +### Description + +Override the common configuration with a particular configuration for the Ingress module. + ## .spec.distribution.modules.ingress.overrides.ingresses ### Properties @@ -2109,25 +2214,25 @@ The type of the nginx ingress controller, must be ***none***, ***single*** or ** ### Description -If true, the ingress will not have authentication +If true, the ingress will not have authentication even if `.spec.modules.auth.provider.type` is SSO or Basic Auth. ## .spec.distribution.modules.ingress.overrides.ingresses.forecastle.host ### Description -The host of the ingress +Use this host for the ingress instead of the default one. ## .spec.distribution.modules.ingress.overrides.ingresses.forecastle.ingressClass ### Description -The ingress class of the ingress +Use this ingress class for the ingress instead of the default one. ## .spec.distribution.modules.ingress.overrides.nodeSelector ### Description -The node selector to use to place the pods for the ingress module +Set to override the node selector used to place the pods of the Ingress module. ## .spec.distribution.modules.ingress.overrides.tolerations @@ -2142,13 +2247,13 @@ The node selector to use to place the pods for the ingress module ### Description -The tolerations that will be added to the pods for the ingress module +Set to override the tolerations that will be added to the pods of the Ingress module. ## .spec.distribution.modules.ingress.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -2166,7 +2271,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -2194,6 +2299,10 @@ The value of the toleration | [overrides](#specdistributionmodulesloggingoverrides) | `object` | Optional | | [type](#specdistributionmodulesloggingtype) | `string` | Required | +### Description + +Configuration for the Logging module. + ## .spec.distribution.modules.logging.cerebro ### Properties @@ -2202,6 +2311,10 @@ The value of the toleration |:-------------------------------------------------------------|:---------|:---------| | [overrides](#specdistributionmodulesloggingcerebrooverrides) | `object` | Optional | +### Description + +DEPRECATED since KFD v1.26.6, 1.27.5, v1.28.0. + ## .spec.distribution.modules.logging.cerebro.overrides ### Properties @@ -2215,7 +2328,7 @@ The value of the toleration ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.logging.cerebro.overrides.tolerations @@ -2230,13 +2343,13 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.logging.cerebro.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -2254,7 +2367,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -2284,55 +2397,55 @@ The value of the toleration ### Description -when using the customOutputs logging type, you need to manually specify the spec of the several Output and ClusterOutputs that the Logging Operator expects to forward the logs collected by the pre-defined flows. +When using the `customOutputs` logging type, you need to manually specify the spec of the several `Output` and `ClusterOutputs` that the Logging Operator expects to forward the logs collected by the pre-defined flows. ## .spec.distribution.modules.logging.customOutputs.audit ### Description -This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow. +This value defines where the output from the `audit` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}` ## .spec.distribution.modules.logging.customOutputs.errors ### Description -This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow. +This value defines where the output from the `errors` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}` ## .spec.distribution.modules.logging.customOutputs.events ### Description -This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow. +This value defines where the output from the `events` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}` ## .spec.distribution.modules.logging.customOutputs.infra ### Description -This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow. +This value defines where the output from the `infra` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}` ## .spec.distribution.modules.logging.customOutputs.ingressNginx ### Description -This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow. +This value defines where the output from the `ingressNginx` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}` ## .spec.distribution.modules.logging.customOutputs.kubernetes ### Description -This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow. +This value defines where the output from the `kubernetes` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}` ## .spec.distribution.modules.logging.customOutputs.systemdCommon ### Description -This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow. +This value defines where the output from the `systemdCommon` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}` ## .spec.distribution.modules.logging.customOutputs.systemdEtcd ### Description -This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow. +This value defines where the output from the `systemdEtcd` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}` ## .spec.distribution.modules.logging.loki @@ -2344,11 +2457,19 @@ This value defines where the output from Flow will be sent. Will be the `spec` s | [externalEndpoint](#specdistributionmoduleslogginglokiexternalendpoint) | `object` | Optional | | [resources](#specdistributionmoduleslogginglokiresources) | `object` | Optional | +### Description + +Configuration for the Loki package. + ## .spec.distribution.modules.logging.loki.backend +### Description + +The storage backend type for Loki. `minio` will use an in-cluster MinIO deployment for object storage, `externalEndpoint` can be used to point to an external object storage instead of deploying an in-cluster MinIO. + ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -2367,35 +2488,39 @@ This value defines where the output from Flow will be sent. Will be the `spec` s | [insecure](#specdistributionmoduleslogginglokiexternalendpointinsecure) | `boolean` | Optional | | [secretAccessKey](#specdistributionmoduleslogginglokiexternalendpointsecretaccesskey) | `string` | Optional | +### Description + +Configuration for Loki's external storage backend. + ## .spec.distribution.modules.logging.loki.externalEndpoint.accessKeyId ### Description -The access key id of the loki external endpoint +The access key ID (username) for the external S3-compatible bucket. ## .spec.distribution.modules.logging.loki.externalEndpoint.bucketName ### Description -The bucket name of the loki external endpoint +The bucket name of the external S3-compatible object storage. ## .spec.distribution.modules.logging.loki.externalEndpoint.endpoint ### Description -The endpoint of the loki external endpoint +External S3-compatible endpoint for Loki's storage. ## .spec.distribution.modules.logging.loki.externalEndpoint.insecure ### Description -If true, the loki external endpoint will be insecure +If true, will use HTTP as protocol instead of HTTPS. ## .spec.distribution.modules.logging.loki.externalEndpoint.secretAccessKey ### Description -The secret access key of the loki external endpoint +The secret access key (password) for the external S3-compatible bucket. ## .spec.distribution.modules.logging.loki.resources @@ -2419,13 +2544,13 @@ The secret access key of the loki external endpoint ### Description -The cpu limit for the opensearch pods +The CPU limit for the Pod. Example: `1000m`. ## .spec.distribution.modules.logging.loki.resources.limits.memory ### Description -The memory limit for the opensearch pods +The memory limit for the Pod. Example: `1G`. ## .spec.distribution.modules.logging.loki.resources.requests @@ -2440,13 +2565,13 @@ The memory limit for the opensearch pods ### Description -The cpu request for the prometheus pods +The CPU request for the Pod, in cores. Example: `500m`. ## .spec.distribution.modules.logging.loki.resources.requests.memory ### Description -The memory request for the opensearch pods +The memory request for the Pod. Example: `500M`. ## .spec.distribution.modules.logging.minio @@ -2458,6 +2583,10 @@ The memory request for the opensearch pods | [rootUser](#specdistributionmodulesloggingminiorootuser) | `object` | Optional | | [storageSize](#specdistributionmodulesloggingminiostoragesize) | `string` | Optional | +### Description + +Configuration for Logging's MinIO deployment. + ## .spec.distribution.modules.logging.minio.overrides ### Properties @@ -2471,7 +2600,7 @@ The memory request for the opensearch pods ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.logging.minio.overrides.tolerations @@ -2486,13 +2615,13 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.logging.minio.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -2510,7 +2639,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -2536,19 +2665,19 @@ The value of the toleration ### Description -The password of the minio root user +The password for the default MinIO root user. ## .spec.distribution.modules.logging.minio.rootUser.username ### Description -The username of the minio root user +The username for the default MinIO root user. ## .spec.distribution.modules.logging.minio.storageSize ### Description -The PVC size for each minio disk, 6 disks total +The PVC size for each MinIO disk, 6 disks total. ## .spec.distribution.modules.logging.opensearch @@ -2574,7 +2703,7 @@ The PVC size for each minio disk, 6 disks total ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.logging.opensearch.overrides.tolerations @@ -2589,13 +2718,13 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.logging.opensearch.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -2613,7 +2742,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -2648,13 +2777,13 @@ The value of the toleration ### Description -The cpu limit for the opensearch pods +The CPU limit for the Pod. Example: `1000m`. ## .spec.distribution.modules.logging.opensearch.resources.limits.memory ### Description -The memory limit for the opensearch pods +The memory limit for the Pod. Example: `1G`. ## .spec.distribution.modules.logging.opensearch.resources.requests @@ -2669,29 +2798,29 @@ The memory limit for the opensearch pods ### Description -The cpu request for the prometheus pods +The CPU request for the Pod, in cores. Example: `500m`. ## .spec.distribution.modules.logging.opensearch.resources.requests.memory ### Description -The memory request for the opensearch pods +The memory request for the Pod. Example: `500M`. ## .spec.distribution.modules.logging.opensearch.storageSize ### Description -The storage size for the opensearch pods +The storage size for the OpenSearch volumes. ## .spec.distribution.modules.logging.opensearch.type ### Description -The type of the opensearch, must be ***single*** or ***triple*** +The type of OpenSearch deployment. One of: `single` for a single replica or `triple` for an HA 3-replicas deployment. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -2706,6 +2835,10 @@ The type of the opensearch, must be ***single*** or ***triple*** |:--------------------------------------------------------------|:---------|:---------| | [overrides](#specdistributionmodulesloggingoperatoroverrides) | `object` | Optional | +### Description + +Configuration for the Logging Operator. + ## .spec.distribution.modules.logging.operator.overrides ### Properties @@ -2719,7 +2852,7 @@ The type of the opensearch, must be ***single*** or ***triple*** ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.logging.operator.overrides.tolerations @@ -2734,13 +2867,13 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.logging.operator.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -2758,7 +2891,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -2781,13 +2914,17 @@ The value of the toleration | [nodeSelector](#specdistributionmodulesloggingoverridesnodeselector) | `object` | Optional | | [tolerations](#specdistributionmodulesloggingoverridestolerations) | `array` | Optional | +### Description + +Override the common configuration with a particular configuration for the module. + ## .spec.distribution.modules.logging.overrides.ingresses ## .spec.distribution.modules.logging.overrides.nodeSelector ### Description -The node selector to use to place the pods for the dr module +Set to override the node selector used to place the pods of the module. ## .spec.distribution.modules.logging.overrides.tolerations @@ -2802,13 +2939,13 @@ The node selector to use to place the pods for the dr module ### Description -The tolerations that will be added to the pods for the monitoring module +Set to override the tolerations that will be added to the pods of the module. ## .spec.distribution.modules.logging.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -2826,7 +2963,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -2843,11 +2980,17 @@ The value of the toleration ### Description -selects the logging stack. Choosing none will disable the centralized logging. Choosing opensearch will deploy and configure the Logging Operator and an OpenSearch cluster (can be single or triple for HA) where the logs will be stored. Choosing loki will use a distributed Grafana Loki instead of OpenSearh for storage. Choosing customOuput the Logging Operator will be deployed and installed but with no local storage, you will have to create the needed Outputs and ClusterOutputs to ship the logs to your desired storage. +Selects the logging stack. Options are: +- `none`: will disable the centralized logging. +- `opensearch`: will deploy and configure the Logging Operator and an OpenSearch cluster (can be single or triple for HA) where the logs will be stored. +- `loki`: will use a distributed Grafana Loki instead of OpenSearh for storage. +- `customOuputs`: the Logging Operator will be deployed and installed but with no local storage, you will have to create the needed Outputs and ClusterOutputs to ship the logs to your desired storage. + +Default is `opensearch`. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:----------------| @@ -2876,7 +3019,7 @@ selects the logging stack. Choosing none will disable the centralized logging. C ### Description -configuration for the Monitoring module components +Configuration for the Monitoring module. ## .spec.distribution.modules.monitoring.alertmanager @@ -2892,19 +3035,19 @@ configuration for the Monitoring module components ### Description -The webhook url to send deadman switch monitoring, for example to use with healthchecks.io +The webhook URL to send dead man's switch monitoring, for example to use with healthchecks.io. ## .spec.distribution.modules.monitoring.alertmanager.installDefaultRules ### Description -If true, the default rules will be installed +Set to false to avoid installing the Prometheus rules (alerts) included with the distribution. ## .spec.distribution.modules.monitoring.alertmanager.slackWebhookUrl ### Description -The slack webhook url to send alerts +The Slack webhook URL where to send the infrastructural and workload alerts to. ## .spec.distribution.modules.monitoring.blackboxExporter @@ -2927,7 +3070,7 @@ The slack webhook url to send alerts ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.monitoring.blackboxExporter.overrides.tolerations @@ -2942,13 +3085,13 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.monitoring.blackboxExporter.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -2966,7 +3109,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -3010,7 +3153,7 @@ Notice that by default anonymous access is enabled. ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.monitoring.grafana.overrides.tolerations @@ -3025,13 +3168,13 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.monitoring.grafana.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -3049,7 +3192,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -3095,7 +3238,7 @@ More details in [Grafana's documentation](https://grafana.com/docs/grafana/lates ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.monitoring.kubeStateMetrics.overrides.tolerations @@ -3110,13 +3253,13 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.monitoring.kubeStateMetrics.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -3134,7 +3277,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -3158,15 +3301,19 @@ The value of the toleration | [overrides](#specdistributionmodulesmonitoringmimiroverrides) | `object` | Optional | | [retentionTime](#specdistributionmodulesmonitoringmimirretentiontime) | `string` | Optional | +### Description + +Configuration for the Mimir package. + ## .spec.distribution.modules.monitoring.mimir.backend ### Description -The backend for the mimir pods, must be ***minio*** or ***externalEndpoint*** +The storage backend type for Mimir. `minio` will use an in-cluster MinIO deployment for object storage, `externalEndpoint` can be used to point to an external S3-compatible object storage instead of deploying an in-cluster MinIO. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -3185,35 +3332,39 @@ The backend for the mimir pods, must be ***minio*** or ***externalEndpoint*** | [insecure](#specdistributionmodulesmonitoringmimirexternalendpointinsecure) | `boolean` | Optional | | [secretAccessKey](#specdistributionmodulesmonitoringmimirexternalendpointsecretaccesskey) | `string` | Optional | +### Description + +Configuration for Mimir's external storage backend. + ## .spec.distribution.modules.monitoring.mimir.externalEndpoint.accessKeyId ### Description -The access key id of the external mimir backend +The access key ID (username) for the external S3-compatible bucket. ## .spec.distribution.modules.monitoring.mimir.externalEndpoint.bucketName ### Description -The bucket name of the external mimir backend +The bucket name of the external S3-compatible object storage. ## .spec.distribution.modules.monitoring.mimir.externalEndpoint.endpoint ### Description -The endpoint of the external mimir backend +External S3-compatible endpoint for Mimir's storage. ## .spec.distribution.modules.monitoring.mimir.externalEndpoint.insecure ### Description -If true, the external mimir backend will not use tls +If true, will use HTTP as protocol instead of HTTPS. ## .spec.distribution.modules.monitoring.mimir.externalEndpoint.secretAccessKey ### Description -The secret access key of the external mimir backend +The secret access key (password) for the external S3-compatible bucket. ## .spec.distribution.modules.monitoring.mimir.overrides @@ -3228,7 +3379,7 @@ The secret access key of the external mimir backend ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.monitoring.mimir.overrides.tolerations @@ -3243,13 +3394,13 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.monitoring.mimir.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -3267,7 +3418,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -3284,7 +3435,7 @@ The value of the toleration ### Description -The retention time for the mimir pods +The retention time for the logs stored in Mimir. Default is `30d`. Value must match the regular expression `[0-9]+(ns|us|µs|ms|s|m|h|d|w|y)` where y = 365 days. ## .spec.distribution.modules.monitoring.minio @@ -3296,6 +3447,10 @@ The retention time for the mimir pods | [rootUser](#specdistributionmodulesmonitoringminiorootuser) | `object` | Optional | | [storageSize](#specdistributionmodulesmonitoringminiostoragesize) | `string` | Optional | +### Description + +Configuration for Monitoring's MinIO deployment. + ## .spec.distribution.modules.monitoring.minio.overrides ### Properties @@ -3309,7 +3464,7 @@ The retention time for the mimir pods ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.monitoring.minio.overrides.tolerations @@ -3324,13 +3479,13 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.monitoring.minio.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -3348,7 +3503,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -3374,19 +3529,19 @@ The value of the toleration ### Description -The password for the minio root user +The password for the default MinIO root user. ## .spec.distribution.modules.monitoring.minio.rootUser.username ### Description -The username for the minio root user +The username for the default MinIO root user. ## .spec.distribution.modules.monitoring.minio.storageSize ### Description -The storage size for the minio pods +The PVC size for each MinIO disk, 6 disks total. ## .spec.distribution.modules.monitoring.overrides @@ -3398,13 +3553,17 @@ The storage size for the minio pods | [nodeSelector](#specdistributionmodulesmonitoringoverridesnodeselector) | `object` | Optional | | [tolerations](#specdistributionmodulesmonitoringoverridestolerations) | `array` | Optional | +### Description + +Override the common configuration with a particular configuration for the module. + ## .spec.distribution.modules.monitoring.overrides.ingresses ## .spec.distribution.modules.monitoring.overrides.nodeSelector ### Description -The node selector to use to place the pods for the dr module +Set to override the node selector used to place the pods of the module. ## .spec.distribution.modules.monitoring.overrides.tolerations @@ -3419,13 +3578,13 @@ The node selector to use to place the pods for the dr module ### Description -The tolerations that will be added to the pods for the monitoring module +Set to override the tolerations that will be added to the pods of the module. ## .spec.distribution.modules.monitoring.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -3443,7 +3602,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -3498,13 +3657,13 @@ Set this option to ship the collected metrics to a remote Prometheus receiver. ### Description -The cpu limit for the opensearch pods +The CPU limit for the Pod. Example: `1000m`. ## .spec.distribution.modules.monitoring.prometheus.resources.limits.memory ### Description -The memory limit for the opensearch pods +The memory limit for the Pod. Example: `1G`. ## .spec.distribution.modules.monitoring.prometheus.resources.requests @@ -3519,31 +3678,31 @@ The memory limit for the opensearch pods ### Description -The cpu request for the prometheus pods +The CPU request for the Pod, in cores. Example: `500m`. ## .spec.distribution.modules.monitoring.prometheus.resources.requests.memory ### Description -The memory request for the opensearch pods +The memory request for the Pod. Example: `500M`. ## .spec.distribution.modules.monitoring.prometheus.retentionSize ### Description -The retention size for the k8s Prometheus instance. +The retention size for the `k8s` Prometheus instance. ## .spec.distribution.modules.monitoring.prometheus.retentionTime ### Description -The retention time for the k8s Prometheus instance. +The retention time for the `k8s` Prometheus instance. ## .spec.distribution.modules.monitoring.prometheus.storageSize ### Description -The storage size for the k8s Prometheus instance. +The storage size for the `k8s` Prometheus instance. ## .spec.distribution.modules.monitoring.prometheusAgent @@ -3584,13 +3743,13 @@ Set this option to ship the collected metrics to a remote Prometheus receiver. ### Description -The cpu limit for the opensearch pods +The CPU limit for the Pod. Example: `1000m`. ## .spec.distribution.modules.monitoring.prometheusAgent.resources.limits.memory ### Description -The memory limit for the opensearch pods +The memory limit for the Pod. Example: `1G`. ## .spec.distribution.modules.monitoring.prometheusAgent.resources.requests @@ -3605,28 +3764,30 @@ The memory limit for the opensearch pods ### Description -The cpu request for the prometheus pods +The CPU request for the Pod, in cores. Example: `500m`. ## .spec.distribution.modules.monitoring.prometheusAgent.resources.requests.memory ### Description -The memory request for the opensearch pods +The memory request for the Pod. Example: `500M`. ## .spec.distribution.modules.monitoring.type ### Description -The type of the monitoring, must be ***none***, ***prometheus***, ***prometheusAgent*** or ***mimir***. +The type of the monitoring, must be `none`, `prometheus`, `prometheusAgent` or `mimir`. - `none`: will disable the whole monitoring stack. -- `prometheus`: will install Prometheus Operator and a preconfigured Prometheus instance, Alertmanager, a set of alert rules, exporters needed to monitor all the components of the cluster, Grafana and a series of dashboards to view the collected metrics, and more. +- `prometheus`: will install Prometheus Operator and a preconfigured Prometheus instace, Alertmanager, a set of alert rules, exporters needed to monitor all the components of the cluster, Grafana and a series of dashboards to view the collected metrics, and more. - `prometheusAgent`: wil install Prometheus operator, an instance of Prometheus in Agent mode (no alerting, no queries, no storage), and all the exporters needed to get metrics for the status of the cluster and the workloads. Useful when having a centralized (remote) Prometheus where to ship the metrics and not storing them locally in the cluster. -- `mimir`: will install the same as the `prometheus` option, and in addition Grafana Mimir that allows for longer retention of metrics and the usage of Object Storage. +- `mimir`: will install the same as the `prometheus` option, plus Grafana Mimir that allows for longer retention of metrics and the usage of Object Storage. + +Default is `prometheus`. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:------------------| @@ -3656,7 +3817,7 @@ The type of the monitoring, must be ***none***, ***prometheus***, ***prometheusA ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.monitoring.x509Exporter.overrides.tolerations @@ -3671,13 +3832,13 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.monitoring.x509Exporter.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -3695,7 +3856,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -3717,20 +3878,31 @@ The value of the toleration | [overrides](#specdistributionmodulesnetworkingoverrides) | `object` | Optional | | [tigeraOperator](#specdistributionmodulesnetworkingtigeraoperator) | `object` | Optional | +### Description + +Configuration for the Networking module. + ## .spec.distribution.modules.networking.overrides ### Properties | Property | Type | Required | |:------------------------------------------------------------------------|:---------|:---------| +| [ingresses](#specdistributionmodulesnetworkingoverridesingresses) | `object` | Optional | | [nodeSelector](#specdistributionmodulesnetworkingoverridesnodeselector) | `object` | Optional | | [tolerations](#specdistributionmodulesnetworkingoverridestolerations) | `array` | Optional | +### Description + +Override the common configuration with a particular configuration for the module. + +## .spec.distribution.modules.networking.overrides.ingresses + ## .spec.distribution.modules.networking.overrides.nodeSelector ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the module. ## .spec.distribution.modules.networking.overrides.tolerations @@ -3745,13 +3917,13 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the module. ## .spec.distribution.modules.networking.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -3769,7 +3941,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -3803,7 +3975,7 @@ The value of the toleration ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.networking.tigeraOperator.overrides.tolerations @@ -3818,13 +3990,13 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.networking.tigeraOperator.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -3842,7 +4014,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -3866,6 +4038,10 @@ The value of the toleration | [overrides](#specdistributionmodulespolicyoverrides) | `object` | Optional | | [type](#specdistributionmodulespolicytype) | `string` | Required | +### Description + +Configuration for the Policy module. + ## .spec.distribution.modules.policy.gatekeeper ### Properties @@ -3877,6 +4053,10 @@ The value of the toleration | [installDefaultPolicies](#specdistributionmodulespolicygatekeeperinstalldefaultpolicies) | `boolean` | Required | | [overrides](#specdistributionmodulespolicygatekeeperoverrides) | `object` | Optional | +### Description + +Configuration for the Gatekeeper package. + ## .spec.distribution.modules.policy.gatekeeper.additionalExcludedNamespaces ### Description @@ -3887,11 +4067,11 @@ This parameter adds namespaces to Gatekeeper's exemption list, so it will not en ### Description -The enforcement action to use for the gatekeeper module +The default enforcement action to use for the included constraints. `deny` will block the admission when violations to the policies are found, `warn` will show a message to the user but will admit the violating requests and `dryrun` won't give any feedback to the user but it will log the violations. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -3903,7 +4083,7 @@ The enforcement action to use for the gatekeeper module ### Description -If true, the default policies will be installed +Set to `false` to avoid installing the default Gatekeeper policies (constraints templates and constraints) included with the distribution. ## .spec.distribution.modules.policy.gatekeeper.overrides @@ -3918,7 +4098,7 @@ If true, the default policies will be installed ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.policy.gatekeeper.overrides.tolerations @@ -3933,13 +4113,13 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.policy.gatekeeper.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -3957,7 +4137,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -3981,17 +4161,21 @@ The value of the toleration | [overrides](#specdistributionmodulespolicykyvernooverrides) | `object` | Optional | | [validationFailureAction](#specdistributionmodulespolicykyvernovalidationfailureaction) | `string` | Required | +### Description + +Configuration for the Kyverno package. + ## .spec.distribution.modules.policy.kyverno.additionalExcludedNamespaces ### Description -This parameter adds namespaces to Kyverno's exemption list, so it will not enforce the constraints on them. +This parameter adds namespaces to Kyverno's exemption list, so it will not enforce the policies on them. ## .spec.distribution.modules.policy.kyverno.installDefaultPolicies ### Description -If true, the default policies will be installed +Set to `false` to avoid installing the default Kyverno policies included with distribution. ## .spec.distribution.modules.policy.kyverno.overrides @@ -4006,7 +4190,7 @@ If true, the default policies will be installed ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.policy.kyverno.overrides.tolerations @@ -4021,13 +4205,13 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.policy.kyverno.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -4045,7 +4229,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -4062,11 +4246,11 @@ The value of the toleration ### Description -The validation failure action to use for the kyverno module +The validation failure action to use for the policies, `Enforce` will block when a request does not comply with the policies and `Audit` will not block but log when a request does not comply with the policies. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:----------| @@ -4083,13 +4267,17 @@ The validation failure action to use for the kyverno module | [nodeSelector](#specdistributionmodulespolicyoverridesnodeselector) | `object` | Optional | | [tolerations](#specdistributionmodulespolicyoverridestolerations) | `array` | Optional | +### Description + +Override the common configuration with a particular configuration for the module. + ## .spec.distribution.modules.policy.overrides.ingresses ## .spec.distribution.modules.policy.overrides.nodeSelector ### Description -The node selector to use to place the pods for the dr module +Set to override the node selector used to place the pods of the module. ## .spec.distribution.modules.policy.overrides.tolerations @@ -4104,13 +4292,13 @@ The node selector to use to place the pods for the dr module ### Description -The tolerations that will be added to the pods for the monitoring module +Set to override the tolerations that will be added to the pods of the module. ## .spec.distribution.modules.policy.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -4128,7 +4316,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -4145,11 +4333,13 @@ The value of the toleration ### Description -The type of security to use, either ***none***, ***gatekeeper*** or ***kyverno*** +The type of policy enforcement to use, either `none`, `gatekeeper` or `kyverno`. + +Default is `none`. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------| @@ -4168,6 +4358,10 @@ The type of security to use, either ***none***, ***gatekeeper*** or ***kyverno** | [tempo](#specdistributionmodulestracingtempo) | `object` | Optional | | [type](#specdistributionmodulestracingtype) | `string` | Required | +### Description + +Configuration for the Tracing module. + ## .spec.distribution.modules.tracing.minio ### Properties @@ -4178,6 +4372,10 @@ The type of security to use, either ***none***, ***gatekeeper*** or ***kyverno** | [rootUser](#specdistributionmodulestracingminiorootuser) | `object` | Optional | | [storageSize](#specdistributionmodulestracingminiostoragesize) | `string` | Optional | +### Description + +Configuration for Tracing's MinIO deployment. + ## .spec.distribution.modules.tracing.minio.overrides ### Properties @@ -4191,7 +4389,7 @@ The type of security to use, either ***none***, ***gatekeeper*** or ***kyverno** ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.tracing.minio.overrides.tolerations @@ -4206,13 +4404,13 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.tracing.minio.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -4230,7 +4428,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -4256,19 +4454,19 @@ The value of the toleration ### Description -The password for the minio root user +The password for the default MinIO root user. ## .spec.distribution.modules.tracing.minio.rootUser.username ### Description -The username for the minio root user +The username for the default MinIO root user. ## .spec.distribution.modules.tracing.minio.storageSize ### Description -The storage size for the minio pods +The PVC size for each MinIO disk, 6 disks total. ## .spec.distribution.modules.tracing.overrides @@ -4280,13 +4478,17 @@ The storage size for the minio pods | [nodeSelector](#specdistributionmodulestracingoverridesnodeselector) | `object` | Optional | | [tolerations](#specdistributionmodulestracingoverridestolerations) | `array` | Optional | +### Description + +Override the common configuration with a particular configuration for the module. + ## .spec.distribution.modules.tracing.overrides.ingresses ## .spec.distribution.modules.tracing.overrides.nodeSelector ### Description -The node selector to use to place the pods for the dr module +Set to override the node selector used to place the pods of the module. ## .spec.distribution.modules.tracing.overrides.tolerations @@ -4301,13 +4503,13 @@ The node selector to use to place the pods for the dr module ### Description -The tolerations that will be added to the pods for the monitoring module +Set to override the tolerations that will be added to the pods of the module. ## .spec.distribution.modules.tracing.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -4325,7 +4527,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -4349,15 +4551,19 @@ The value of the toleration | [overrides](#specdistributionmodulestracingtempooverrides) | `object` | Optional | | [retentionTime](#specdistributionmodulestracingtemporetentiontime) | `string` | Optional | +### Description + +Configuration for the Tempo package. + ## .spec.distribution.modules.tracing.tempo.backend ### Description -The backend for the tempo pods, must be ***minio*** or ***externalEndpoint*** +The storage backend type for Tempo. `minio` will use an in-cluster MinIO deployment for object storage, `externalEndpoint` can be used to point to an external S3-compatible object storage instead of deploying an in-cluster MinIO. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -4376,35 +4582,39 @@ The backend for the tempo pods, must be ***minio*** or ***externalEndpoint*** | [insecure](#specdistributionmodulestracingtempoexternalendpointinsecure) | `boolean` | Optional | | [secretAccessKey](#specdistributionmodulestracingtempoexternalendpointsecretaccesskey) | `string` | Optional | +### Description + +Configuration for Tempo's external storage backend. + ## .spec.distribution.modules.tracing.tempo.externalEndpoint.accessKeyId ### Description -The access key id of the external tempo backend +The access key ID (username) for the external S3-compatible bucket. ## .spec.distribution.modules.tracing.tempo.externalEndpoint.bucketName ### Description -The bucket name of the external tempo backend +The bucket name of the external S3-compatible object storage. ## .spec.distribution.modules.tracing.tempo.externalEndpoint.endpoint ### Description -The endpoint of the external tempo backend +External S3-compatible endpoint for Tempo's storage. ## .spec.distribution.modules.tracing.tempo.externalEndpoint.insecure ### Description -If true, the external tempo backend will not use tls +If true, will use HTTP as protocol instead of HTTPS. ## .spec.distribution.modules.tracing.tempo.externalEndpoint.secretAccessKey ### Description -The secret access key of the external tempo backend +The secret access key (password) for the external S3-compatible bucket. ## .spec.distribution.modules.tracing.tempo.overrides @@ -4419,7 +4629,7 @@ The secret access key of the external tempo backend ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.tracing.tempo.overrides.tolerations @@ -4434,13 +4644,13 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.tracing.tempo.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -4458,7 +4668,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -4475,17 +4685,19 @@ The value of the toleration ### Description -The retention time for the tempo pods +The retention time for the traces stored in Tempo. ## .spec.distribution.modules.tracing.type ### Description -The type of tracing to use, either ***none*** or ***tempo*** +The type of tracing to use, either `none` or `tempo`. `none` will disable the Tracing module and `tempo` will install a Grafana Tempo deployment. + +Default is `tempo`. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:--------| @@ -4494,6 +4706,10 @@ The type of tracing to use, either ***none*** or ***tempo*** ## .spec.distributionVersion +### Description + +Defines which KFD version will be installed and, in consequence, the Kubernetes version used to create the cluster. It supports git tags and branches. Example: `v1.30.1`. + ### Constraints **minimum length**: the minimum number of characters for this string is: `1` @@ -4517,7 +4733,7 @@ The type of tracing to use, either ***none*** or ***tempo*** ### Description -This key defines the VPC that will be created in AWS +Configuration for the VPC that will be created to host the EKS cluster and its related resources. If you already have a VPC that you want to use, leave this section empty and use `.spec.kubernetes.vpcId` instead. ## .spec.infrastructure.vpc.network @@ -4532,7 +4748,7 @@ This key defines the VPC that will be created in AWS ### Description -This is the CIDR of the VPC that will be created +The network CIDR for the VPC that will be created ### Constraints @@ -4553,11 +4769,15 @@ This is the CIDR of the VPC that will be created | [private](#specinfrastructurevpcnetworksubnetscidrsprivate) | `array` | Required | | [public](#specinfrastructurevpcnetworksubnetscidrspublic) | `array` | Required | +### Description + +Network CIDRS configuration for private and public subnets. + ## .spec.infrastructure.vpc.network.subnetsCidrs.private ### Description -These are the CIRDs for the private subnets, where the nodes, the pods, and the private load balancers will be created +Network CIRDs for the private subnets, where the nodes, the pods, and the private load balancers will be created ### Constraints @@ -4573,7 +4793,7 @@ These are the CIRDs for the private subnets, where the nodes, the pods, and the ### Description -These are the CIDRs for the public subnets, where the public load balancers and the VPN servers will be created +Network CIDRs for the public subnets, where the public load balancers and the VPN servers will be created ### Constraints @@ -4605,31 +4825,31 @@ These are the CIDRs for the public subnets, where the public load balancers and ### Description -This section defines the creation of VPN bastions +Configuration for the VPN server instances. ## .spec.infrastructure.vpn.bucketNamePrefix ### Description -This value defines the prefix that will be used to create the bucket name where the VPN servers will store the states +This value defines the prefix for the bucket name where the VPN servers will store their state (VPN certificates, users). ## .spec.infrastructure.vpn.dhParamsBits ### Description -The dhParamsBits size used for the creation of the .pem file that will be used in the dh openvpn server.conf file +The `dhParamsBits` size used for the creation of the .pem file that will be used in the dh openvpn server.conf file. ## .spec.infrastructure.vpn.diskSize ### Description -The size of the disk in GB +The size of the disk in GB for each VPN server. Example: entering `50` will create disks of 50 GB. ## .spec.infrastructure.vpn.iamUserNameOverride ### Description -Overrides the default IAM user name for the VPN +Overrides IAM user name for the VPN. Default is to use the cluster name. ### Constraints @@ -4645,25 +4865,25 @@ Overrides the default IAM user name for the VPN ### Description -The size of the AWS EC2 instance +The type of the AWS EC2 instance for each VPN server. Follows AWS EC2 nomenclature. Example: `t3-micro`. ## .spec.infrastructure.vpn.instances ### Description -The number of instances to create, 0 to skip the creation +The number of VPN server instances to create, `0` to skip the creation. ## .spec.infrastructure.vpn.operatorName ### Description -The username of the account to create in the bastion's operating system +The username of the account to create in the bastion's operating system. ## .spec.infrastructure.vpn.port ### Description -The port used by the OpenVPN server +The port where each OpenVPN server will listen for connections. ## .spec.infrastructure.vpn.ssh @@ -4679,7 +4899,7 @@ The port used by the OpenVPN server ### Description -The CIDR enabled in the security group that can access the bastions in SSH +The network CIDR enabled in the security group to access the VPN servers (bastions) via SSH. Setting this to `0.0.0.0/0` will allow any source. ### Constraints @@ -4695,7 +4915,7 @@ The CIDR enabled in the security group that can access the bastions in SSH ### Description -The github user name list that will be used to get the ssh public key that will be added as authorized key to the operatorName user +List of GitHub usernames from whom get their SSH public key and add as authorized keys of the `operatorName` user. ### Constraints @@ -4705,13 +4925,13 @@ The github user name list that will be used to get the ssh public key that will ### Description -This value defines the public keys that will be added to the bastion's operating system NOTES: Not yet implemented +**NOT IN USE**, use `githubUsersName` instead. This value defines the public keys that will be added to the bastion's operating system. ## .spec.infrastructure.vpn.vpcId ### Description -The VPC ID where the VPN servers will be created, required only if .spec.infrastructure.vpc is omitted +The ID of the VPC where the VPN server instances will be created, required only if `.spec.infrastructure.vpc` is omitted. ### Constraints @@ -4727,7 +4947,7 @@ The VPC ID where the VPN servers will be created, required only if .spec.infrast ### Description -The CIDR that will be used to assign IP addresses to the VPN clients when connected +The network CIDR that will be used to assign IP addresses to the VPN clients when connected. ### Constraints @@ -4758,6 +4978,10 @@ The CIDR that will be used to assign IP addresses to the VPN clients when connec | [vpcId](#speckubernetesvpcid) | `string` | Optional | | [workersIAMRoleNamePrefixOverride](#speckubernetesworkersiamrolenameprefixoverride) | `string` | Optional | +### Description + +Defines the Kubernetes components configuration and the values needed for the `kubernetes` phase of furyctl. + ## .spec.kubernetes.apiServer ### Properties @@ -4773,13 +4997,13 @@ The CIDR that will be used to assign IP addresses to the VPN clients when connec ### Description -This value defines if the API server will be accessible only from the private subnets +This value defines if the Kubernetes API server will be accessible from the private subnets. Default it `true`. ## .spec.kubernetes.apiServer.privateAccessCidrs ### Description -This value defines the CIDRs that will be allowed to access the API server from the private subnets +The network CIDRs from the private subnets that will be allowed access the Kubernetes API server. ### Constraints @@ -4795,13 +5019,13 @@ This value defines the CIDRs that will be allowed to access the API server from ### Description -This value defines if the API server will be accessible from the public subnets +This value defines if the Kubernetes API server will be accessible from the public subnets. Default is `false`. ## .spec.kubernetes.apiServer.publicAccessCidrs ### Description -This value defines the CIDRs that will be allowed to access the API server from the public subnets +The network CIDRs from the public subnets that will be allowed access the Kubernetes API server. ### Constraints @@ -4823,11 +5047,17 @@ This value defines the CIDRs that will be allowed to access the API server from | [roles](#speckubernetesawsauthroles) | `array` | Optional | | [users](#speckubernetesawsauthusers) | `array` | Optional | +### Description + +Optional additional security configuration for EKS IAM via the `aws-auth` configmap. + +Ref: https://docs.aws.amazon.com/eks/latest/userguide/auth-configmap.html + ## .spec.kubernetes.awsAuth.additionalAccounts ### Description -This optional array defines additional AWS accounts that will be added to the aws-auth configmap +This optional array defines additional AWS accounts that will be added to the `aws-auth` configmap. ## .spec.kubernetes.awsAuth.roles @@ -4841,7 +5071,7 @@ This optional array defines additional AWS accounts that will be added to the aw ### Description -This optional array defines additional IAM roles that will be added to the aws-auth configmap +This optional array defines additional IAM roles that will be added to the `aws-auth` configmap. ## .spec.kubernetes.awsAuth.roles.groups @@ -4871,7 +5101,7 @@ This optional array defines additional IAM roles that will be added to the aws-a ### Description -This optional array defines additional IAM users that will be added to the aws-auth configmap +This optional array defines additional IAM users that will be added to the `aws-auth` configmap. ## .spec.kubernetes.awsAuth.users.groups @@ -4893,7 +5123,7 @@ This optional array defines additional IAM users that will be added to the aws-a ### Description -Overrides the default IAM role name prefix for the EKS cluster +Overrides the default prefix for the IAM role name of the EKS cluster. If not set, a name will be generated from the cluster name. ### Constraints @@ -4909,7 +5139,37 @@ Overrides the default IAM role name prefix for the EKS cluster ### Description -Optional Kubernetes Cluster log retention in days. Defaults to 90 days. +Optional Kubernetes Cluster log retention in CloudWatch, expressed in days. Setting the value to zero (`0`) makes retention last forever. Default is `90` days. + +### Constraints + +**enum**: the value of this property must be equal to one of the following integer values: + +| Value | +|:----| +|0 | +|1 | +|3 | +|5 | +|7 | +|14 | +|30 | +|60 | +|90 | +|120 | +|150 | +|180 | +|365 | +|400 | +|545 | +|731 | +|1096| +|1827| +|2192| +|2557| +|2922| +|3288| +|3653| ## .spec.kubernetes.logsTypes @@ -4919,7 +5179,7 @@ Optional list of Kubernetes Cluster log types to enable. Defaults to all types. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:--------------------| @@ -4933,7 +5193,7 @@ Optional list of Kubernetes Cluster log types to enable. Defaults to all types. ### Description -This key contains the ssh public key that can connect to the nodes via SSH using the ec2-user user +The SSH public key that can connect to the nodes via SSH using the `ec2-user` user. Example: the contents of your `~/.ssh/id_ras.pub` file. ## .spec.kubernetes.nodePools @@ -4954,6 +5214,10 @@ This key contains the ssh public key that can connect to the nodes via SSH using | [taints](#speckubernetesnodepoolstaints) | `array` | Optional | | [type](#speckubernetesnodepoolstype) | `string` | Optional | +### Description + +Array with all the node pool definitions that will join the cluster. Each item is an object. + ## .spec.kubernetes.nodePools.additionalFirewallRules ### Properties @@ -4964,6 +5228,10 @@ This key contains the ssh public key that can connect to the nodes via SSH using | [self](#speckubernetesnodepoolsadditionalfirewallrulesself) | `array` | Optional | | [sourceSecurityGroupId](#speckubernetesnodepoolsadditionalfirewallrulessourcesecuritygroupid) | `array` | Optional | +### Description + +Optional additional firewall rules that will be attached to the nodes. + ## .spec.kubernetes.nodePools.additionalFirewallRules.cidrBlocks ### Properties @@ -4979,10 +5247,12 @@ This key contains the ssh public key that can connect to the nodes via SSH using ### Description -The CIDR blocks for the FW rule. At the moment the first item of the list will be used, others will be ignored. +The CIDR blocks objects definition for the Firewall rule. Even though it is a list, only one item is currently supported. See https://github.com/sighupio/fury-eks-installer/issues/46 for more details. ### Constraints +**maximum number of items**: the maximum number of items for this array is: `1` + **minimum number of items**: the minimum number of items for this array is: `1` ## .spec.kubernetes.nodePools.additionalFirewallRules.cidrBlocks.cidrBlocks @@ -5010,6 +5280,10 @@ The CIDR blocks for the FW rule. At the moment the first item of the list will b | [from](#speckubernetesnodepoolsadditionalfirewallrulescidrblocksportsfrom) | `integer` | Required | | [to](#speckubernetesnodepoolsadditionalfirewallrulescidrblocksportsto) | `integer` | Required | +### Description + +Port range for the Firewall Rule. + ## .spec.kubernetes.nodePools.additionalFirewallRules.cidrBlocks.ports.from ## .spec.kubernetes.nodePools.additionalFirewallRules.cidrBlocks.ports.to @@ -5028,11 +5302,19 @@ The CIDR blocks for the FW rule. At the moment the first item of the list will b ## .spec.kubernetes.nodePools.additionalFirewallRules.cidrBlocks.tags +### Description + +Additional AWS tags for the Firewall rule. + ## .spec.kubernetes.nodePools.additionalFirewallRules.cidrBlocks.type +### Description + +The type of the Firewall rule, can be `ingress` for incoming traffic or `egress` for outgoing traffic. + ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:----------| @@ -5060,7 +5342,7 @@ The CIDR blocks for the FW rule. At the moment the first item of the list will b ### Description -The name of the FW rule +The name of the Firewall rule. ## .spec.kubernetes.nodePools.additionalFirewallRules.self.ports @@ -5071,6 +5353,10 @@ The name of the FW rule | [from](#speckubernetesnodepoolsadditionalfirewallrulesselfportsfrom) | `integer` | Required | | [to](#speckubernetesnodepoolsadditionalfirewallrulesselfportsto) | `integer` | Required | +### Description + +Port range for the Firewall Rule. + ## .spec.kubernetes.nodePools.additionalFirewallRules.self.ports.from ## .spec.kubernetes.nodePools.additionalFirewallRules.self.ports.to @@ -5079,7 +5365,7 @@ The name of the FW rule ### Description -The protocol of the FW rule +The protocol of the Firewall rule. ### Constraints @@ -5095,23 +5381,23 @@ The protocol of the FW rule ### Description -If true, the source will be the security group itself +If `true`, the source will be the security group itself. ## .spec.kubernetes.nodePools.additionalFirewallRules.self.tags ### Description -The tags of the FW rule +Additional AWS tags for the Firewall rule. ## .spec.kubernetes.nodePools.additionalFirewallRules.self.type ### Description -The type of the FW rule can be ingress or egress +The type of the Firewall rule, can be `ingress` for incoming traffic or `egress` for outgoing traffic. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:----------| @@ -5139,7 +5425,7 @@ The type of the FW rule can be ingress or egress ### Description -The name of the FW rule +The name for the additional Firewall rule Security Group. ## .spec.kubernetes.nodePools.additionalFirewallRules.sourceSecurityGroupId.ports @@ -5150,6 +5436,10 @@ The name of the FW rule | [from](#speckubernetesnodepoolsadditionalfirewallrulessourcesecuritygroupidportsfrom) | `integer` | Required | | [to](#speckubernetesnodepoolsadditionalfirewallrulessourcesecuritygroupidportsto) | `integer` | Required | +### Description + +Port range for the Firewall Rule. + ## .spec.kubernetes.nodePools.additionalFirewallRules.sourceSecurityGroupId.ports.from ## .spec.kubernetes.nodePools.additionalFirewallRules.sourceSecurityGroupId.ports.to @@ -5158,7 +5448,7 @@ The name of the FW rule ### Description -The protocol of the FW rule +The protocol of the Firewall rule. ### Constraints @@ -5174,23 +5464,23 @@ The protocol of the FW rule ### Description -The source security group ID +The source security group ID. ## .spec.kubernetes.nodePools.additionalFirewallRules.sourceSecurityGroupId.tags ### Description -The tags of the FW rule +Additional AWS tags for the Firewall rule. ## .spec.kubernetes.nodePools.additionalFirewallRules.sourceSecurityGroupId.type ### Description -The type of the FW rule can be ingress or egress +The type of the Firewall rule, can be `ingress` for incoming traffic or `egress` for outgoing traffic. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:----------| @@ -5210,19 +5500,19 @@ The type of the FW rule can be ingress or egress ### Description -The AMI ID to use for the nodes +Optional. Custom AMI ID to use for the nodes. ## .spec.kubernetes.nodePools.ami.owner ### Description -The owner of the AMI +Optional. The owner of the custom AMI. ## .spec.kubernetes.nodePools.attachedTargetGroups ### Description -This optional array defines additional target groups to attach to the instances in the node pool +This optional array defines additional target groups to attach to the instances in the node pool. ### Constraints @@ -5238,11 +5528,11 @@ This optional array defines additional target groups to attach to the instances ### Description -The container runtime to use for the nodes +The container runtime to use in the nodes of the node pool. Default is `containerd`. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------| @@ -5261,31 +5551,45 @@ The container runtime to use for the nodes | [volumeSize](#speckubernetesnodepoolsinstancevolumesize) | `integer` | Optional | | [volumeType](#speckubernetesnodepoolsinstancevolumetype) | `string` | Optional | +### Description + +Configuration for the instances that will be used in the node pool. + ## .spec.kubernetes.nodePools.instance.maxPods +### Description + +Set the maximum pods per node to a custom value. If not set will use EKS default value that depends on the instance type. + +Ref: https://github.com/awslabs/amazon-eks-ami/blob/main/templates/shared/runtime/eni-max-pods.txt + ## .spec.kubernetes.nodePools.instance.spot ### Description -If true, the nodes will be created as spot instances +If `true`, the nodes will be created as spot instances. Default is `false`. ## .spec.kubernetes.nodePools.instance.type ### Description -The instance type to use for the nodes +The instance type to use for the nodes. ## .spec.kubernetes.nodePools.instance.volumeSize ### Description -The size of the disk in GB +The size of the disk in GB. ## .spec.kubernetes.nodePools.instance.volumeType +### Description + +Volume type for the instance disk. Default is `gp2`. + ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-----------| @@ -5298,13 +5602,13 @@ The size of the disk in GB ### Description -Kubernetes labels that will be added to the nodes +Kubernetes labels that will be added to the nodes. ## .spec.kubernetes.nodePools.name ### Description -The name of the node pool +The name of the node pool. ## .spec.kubernetes.nodePools.size @@ -5319,19 +5623,19 @@ The name of the node pool ### Description -The maximum number of nodes in the node pool +The maximum number of nodes in the node pool. ## .spec.kubernetes.nodePools.size.min ### Description -The minimum number of nodes in the node pool +The minimum number of nodes in the node pool. ## .spec.kubernetes.nodePools.subnetIds ### Description -This value defines the subnet IDs where the nodes will be created +Optional list of subnet IDs where to create the nodes. ### Constraints @@ -5347,7 +5651,7 @@ This value defines the subnet IDs where the nodes will be created ### Description -AWS tags that will be added to the ASG and EC2 instances +AWS tags that will be added to the ASG and EC2 instances. ## .spec.kubernetes.nodePools.taints @@ -5365,7 +5669,7 @@ AWS tags that will be added to the ASG and EC2 instances ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------------| @@ -5376,11 +5680,11 @@ AWS tags that will be added to the ASG and EC2 instances ### Description -Either `launch_configurations`, `launch_templates` or `both`. For new clusters use `launch_templates`, for existing cluster you'll need to migrate from `launch_configurations` to `launch_templates` using `both` as interim. +Accepted values are `launch_configurations`, `launch_templates` or `both`. For new clusters use `launch_templates`, for adopting existing cluster you'll need to migrate from `launch_configurations` to `launch_templates` using `both` as interim. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:------------------------| @@ -5392,7 +5696,7 @@ Either `launch_configurations`, `launch_templates` or `both`. For new clusters u ### Description -This value defines the CIDR that will be used to assign IP addresses to the services +This value defines the network CIDR that will be used to assign IP addresses to Kubernetes services. ### Constraints @@ -5408,7 +5712,7 @@ This value defines the CIDR that will be used to assign IP addresses to the serv ### Description -This value defines the subnet IDs where the EKS cluster will be created, required only if .spec.infrastructure.vpc is omitted +Required only if `.spec.infrastructure.vpc` is omitted. This value defines the ID of the subnet where the EKS cluster will be created. ### Constraints @@ -5424,7 +5728,7 @@ This value defines the subnet IDs where the EKS cluster will be created, require ### Description -This value defines the VPC ID where the EKS cluster will be created, required only if .spec.infrastructure.vpc is omitted +Required only if `.spec.infrastructure.vpc` is omitted. This value defines the ID of the VPC where the EKS cluster and its related resources will be created. ### Constraints @@ -5440,7 +5744,7 @@ This value defines the VPC ID where the EKS cluster will be created, required on ### Description -Overrides the default IAM role name prefix for the EKS workers +Overrides the default prefix for the IAM role name of the EKS workers. If not set, a name will be generated from the cluster name. ### Constraints @@ -5474,14 +5778,15 @@ Overrides the default IAM role name prefix for the EKS workers ### Properties -| Property | Type | Required | -|:-----------------------------------------------|:---------|:---------| -| [chart](#specpluginshelmreleaseschart) | `string` | Required | -| [name](#specpluginshelmreleasesname) | `string` | Required | -| [namespace](#specpluginshelmreleasesnamespace) | `string` | Required | -| [set](#specpluginshelmreleasesset) | `array` | Optional | -| [values](#specpluginshelmreleasesvalues) | `array` | Optional | -| [version](#specpluginshelmreleasesversion) | `string` | Optional | +| Property | Type | Required | +|:---------------------------------------------------------------------------------|:----------|:---------| +| [chart](#specpluginshelmreleaseschart) | `string` | Required | +| [disableValidationOnInstall](#specpluginshelmreleasesdisablevalidationoninstall) | `boolean` | Optional | +| [name](#specpluginshelmreleasesname) | `string` | Required | +| [namespace](#specpluginshelmreleasesnamespace) | `string` | Required | +| [set](#specpluginshelmreleasesset) | `array` | Optional | +| [values](#specpluginshelmreleasesvalues) | `array` | Optional | +| [version](#specpluginshelmreleasesversion) | `string` | Optional | ## .spec.plugins.helm.releases.chart @@ -5489,6 +5794,12 @@ Overrides the default IAM role name prefix for the EKS workers The chart of the release +## .spec.plugins.helm.releases.disableValidationOnInstall + +### Description + +Disable running `helm diff` validation when installing the plugin, it will still be done when upgrading. + ## .spec.plugins.helm.releases.name ### Description @@ -5578,9 +5889,13 @@ The name of the kustomize plugin ## .spec.region +### Description + +Defines in which AWS region the cluster and all the related resources will be created. + ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-----------------| @@ -5628,6 +5943,10 @@ This map defines which will be the common tags that will be added to all the res |:----------------------------------------------|:---------|:---------| | [terraform](#spectoolsconfigurationterraform) | `object` | Required | +### Description + +Configuration for tools used by furyctl, like Terraform. + ## .spec.toolsConfiguration.terraform ### Properties @@ -5644,6 +5963,10 @@ This map defines which will be the common tags that will be added to all the res |:----------------------------------------------|:---------|:---------| | [s3](#spectoolsconfigurationterraformstates3) | `object` | Required | +### Description + +Configuration for storing the Terraform state of the cluster. + ## .spec.toolsConfiguration.terraform.state.s3 ### Properties @@ -5655,17 +5978,21 @@ This map defines which will be the common tags that will be added to all the res | [region](#spectoolsconfigurationterraformstates3region) | `string` | Required | | [skipRegionValidation](#spectoolsconfigurationterraformstates3skipregionvalidation) | `boolean` | Optional | +### Description + +Configuration for the S3 bucket used to store the Terraform state. + ## .spec.toolsConfiguration.terraform.state.s3.bucketName ### Description -This value defines which bucket will be used to store all the states +This value defines which bucket will be used to store all the states. ## .spec.toolsConfiguration.terraform.state.s3.keyPrefix ### Description -This value defines which folder will be used to store all the states inside the bucket +This value defines which folder will be used to store all the states inside the bucket. ### Constraints @@ -5683,11 +6010,11 @@ This value defines which folder will be used to store all the states inside the ### Description -This value defines in which region the bucket is located +This value defines in which region the bucket is located. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-----------------| @@ -5725,5 +6052,5 @@ This value defines in which region the bucket is located ### Description -This value defines if the region of the bucket should be validated or not by Terraform, useful when using a bucket in a recently added region +This value defines if the region of the bucket should be validated or not by Terraform, useful when using a bucket in a recently added region. diff --git a/docs/schemas/kfddistribution-kfd-v1alpha2.md b/docs/schemas/kfddistribution-kfd-v1alpha2.md index 6118a1540..a2520654f 100644 --- a/docs/schemas/kfddistribution-kfd-v1alpha2.md +++ b/docs/schemas/kfddistribution-kfd-v1alpha2.md @@ -2,8 +2,14 @@ This document explains the full schema for the `kind: KFDDistribution` for the `furyctl.yaml` file used by `furyctl`. This configuration file will be used to deploy the Kubernetes Fury Distribution modules on top of an existing Kubernetes cluster. -An example file can be found [here](https://github.com/sighupio/fury-distribution/blob/feature/schema-docs/templates/config/kfddistribution-kfd-v1alpha2.yaml.tpl). +An example configuration file can be created by running the following command: +```bash +furyctl create config --kind KFDDistribution --version v1.29.4 --name example-cluster +``` + +> [!NOTE] +> Replace the version with your desired version of KFD. ## Properties | Property | Type | Required | @@ -13,6 +19,10 @@ An example file can be found [here](https://github.com/sighupio/fury-distributio | [metadata](#metadata) | `object` | Required | | [spec](#spec) | `object` | Required | +### Description + +KFD modules deployed on top of an existing Kubernetes cluster. + ## .apiVersion ### Constraints @@ -29,7 +39,7 @@ An example file can be found [here](https://github.com/sighupio/fury-distributio ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:------------------| @@ -45,6 +55,10 @@ An example file can be found [here](https://github.com/sighupio/fury-distributio ## .metadata.name +### Description + +The name of the cluster. It will also be used as a prefix for all the other resources created. + ### Constraints **maximum length**: the maximum number of characters for this string is: `56` @@ -84,11 +98,15 @@ An example file can be found [here](https://github.com/sighupio/fury-distributio | [relativeVendorPath](#specdistributioncommonrelativevendorpath) | `string` | Optional | | [tolerations](#specdistributioncommontolerations) | `array` | Optional | +### Description + +Common configuration for all the distribution modules. + ## .spec.distribution.common.nodeSelector ### Description -The node selector to use to place the pods for all the KFD modules +The node selector to use to place the pods for all the KFD modules. Follows Kubernetes selector format. Example: `node.kubernetes.io/role: infra`. ## .spec.distribution.common.provider @@ -102,13 +120,13 @@ The node selector to use to place the pods for all the KFD modules ### Description -The type of the provider +The provider type. Don't set. FOR INTERNAL USE ONLY. ## .spec.distribution.common.registry ### Description -URL of the registry where to pull images from for the Distribution phase. (Default is registry.sighup.io/fury). +URL of the registry where to pull images from for the Distribution phase. (Default is `registry.sighup.io/fury`). NOTE: If plugins are pulling from the default registry, the registry will be replaced for the plugin too. @@ -116,7 +134,7 @@ NOTE: If plugins are pulling from the default registry, the registry will be rep ### Description -The relative path to the vendor directory, does not need to be changed +The relative path to the vendor directory, does not need to be changed. ## .spec.distribution.common.tolerations @@ -131,13 +149,19 @@ The relative path to the vendor directory, does not need to be changed ### Description -The tolerations that will be added to the pods for all the KFD modules +An array with the tolerations that will be added to the pods for all the KFD modules. Follows Kubernetes tolerations format. Example: + +```yaml +- effect: NoSchedule + key: node.kubernetes.io/role + value: infra +``` ## .spec.distribution.common.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -155,7 +179,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -202,7 +226,7 @@ The behavior of the configmap ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:----------| @@ -410,7 +434,7 @@ The behavior of the secret ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:----------| @@ -493,7 +517,7 @@ The type of the secret ### Description -The kubeconfig file path +The path to the kubeconfig file. ## .spec.distribution.modules @@ -522,11 +546,15 @@ The kubeconfig file path | [pomerium](#specdistributionmodulesauthpomerium) | `object` | Optional | | [provider](#specdistributionmodulesauthprovider) | `object` | Required | +### Description + +Configuration for the Auth module. + ## .spec.distribution.modules.auth.baseDomain ### Description -The base domain for the auth module +Base domain for the ingresses created by the Auth module (Gangplank, Pomerium, Dex). Notice that when nginx type is dual, these will use the `external` ingress class. ## .spec.distribution.modules.auth.dex @@ -539,17 +567,32 @@ The base domain for the auth module | [expiry](#specdistributionmodulesauthdexexpiry) | `object` | Optional | | [overrides](#specdistributionmodulesauthdexoverrides) | `object` | Optional | +### Description + +Configuration for the Dex package. + ## .spec.distribution.modules.auth.dex.additionalStaticClients ### Description -The additional static clients for dex +Additional static clients defitions that will be added to the default clients included with the distribution in Dex's configuration. Example: + +```yaml +additionalStaticClients: + - id: my-custom-client + name: "A custom additional static client" + redirectURIs: + - "https://myapp.tld/redirect" + - "https://alias.tld/oidc-callback" + secret: supersecretpassword +``` +Reference: https://dexidp.io/docs/connectors/local/ ## .spec.distribution.modules.auth.dex.connectors ### Description -The connectors for dex +A list with each item defining a Dex connector. Follows Dex connectors configuration format: https://dexidp.io/docs/connectors/ ## .spec.distribution.modules.auth.dex.expiry @@ -585,7 +628,7 @@ Dex signing key expiration time duration (default 6h). ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.auth.dex.overrides.tolerations @@ -600,13 +643,13 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.auth.dex.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -624,7 +667,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -647,13 +690,17 @@ The value of the toleration | [nodeSelector](#specdistributionmodulesauthoverridesnodeselector) | `object` | Optional | | [tolerations](#specdistributionmodulesauthoverridestolerations) | `array` | Optional | +### Description + +Override the common configuration with a particular configuration for the Auth module. + ## .spec.distribution.modules.auth.overrides.ingresses ## .spec.distribution.modules.auth.overrides.nodeSelector ### Description -The node selector to use to place the pods for the auth module +Set to override the node selector used to place the pods of the Auth module. ## .spec.distribution.modules.auth.overrides.tolerations @@ -668,13 +715,13 @@ The node selector to use to place the pods for the auth module ### Description -The tolerations that will be added to the pods for the auth module +Set to override the tolerations that will be added to the pods of the Auth module. ## .spec.distribution.modules.auth.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -692,7 +739,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -788,7 +835,7 @@ override default routes for KFD components ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -802,7 +849,7 @@ override default routes for KFD components ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -892,27 +939,36 @@ cat ec_private.pem | base64 | [password](#specdistributionmodulesauthproviderbasicauthpassword) | `string` | Required | | [username](#specdistributionmodulesauthproviderbasicauthusername) | `string` | Required | +### Description + +Configuration for the HTTP Basic Auth provider. + ## .spec.distribution.modules.auth.provider.basicAuth.password ### Description -The password for the basic auth +The password for logging in with the HTTP basic authentication. ## .spec.distribution.modules.auth.provider.basicAuth.username ### Description -The username for the basic auth +The username for logging in with the HTTP basic authentication. ## .spec.distribution.modules.auth.provider.type ### Description -The type of the provider, must be ***none***, ***sso*** or ***basicAuth*** +The type of the Auth provider, options are: +- `none`: will disable authentication in the infrastructural ingresses. +- `sso`: will protect the infrastructural ingresses with Pomerium and Dex (SSO) and require authentication before accessing them. +- `basicAuth`: will protect the infrastructural ingresses with HTTP basic auth (username and password) authentication. + +Default is `none`. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:------------| @@ -930,6 +986,10 @@ The type of the provider, must be ***none***, ***sso*** or ***basicAuth*** | [type](#specdistributionmodulesdrtype) | `string` | Required | | [velero](#specdistributionmodulesdrvelero) | `object` | Optional | +### Description + +Configuration for the Disaster Recovery module. + ## .spec.distribution.modules.dr.overrides ### Properties @@ -940,13 +1000,17 @@ The type of the provider, must be ***none***, ***sso*** or ***basicAuth*** | [nodeSelector](#specdistributionmodulesdroverridesnodeselector) | `object` | Optional | | [tolerations](#specdistributionmodulesdroverridestolerations) | `array` | Optional | +### Description + +Override the common configuration with a particular configuration for the module. + ## .spec.distribution.modules.dr.overrides.ingresses ## .spec.distribution.modules.dr.overrides.nodeSelector ### Description -The node selector to use to place the pods for the security module +Set to override the node selector used to place the pods of the module. ## .spec.distribution.modules.dr.overrides.tolerations @@ -961,13 +1025,13 @@ The node selector to use to place the pods for the security module ### Description -The tolerations that will be added to the pods for the monitoring module +Set to override the tolerations that will be added to the pods of the module. ## .spec.distribution.modules.dr.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -985,7 +1049,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -1002,11 +1066,13 @@ The value of the toleration ### Description -The type of the DR, must be ***none*** or ***on-premises*** +The type of the Disaster Recovery, must be `none` or `on-premises`. `none` disables the module and `on-premises` will install Velero and an optional MinIO deployment. + +Default is `none`. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:--------------| @@ -1024,6 +1090,10 @@ The type of the DR, must be ***none*** or ***on-premises*** | [overrides](#specdistributionmodulesdrvelerooverrides) | `object` | Optional | | [schedules](#specdistributionmodulesdrveleroschedules) | `object` | Optional | +### Description + +Configuration for the Velero package. + ## .spec.distribution.modules.dr.velero.backend ### Description @@ -1032,7 +1102,7 @@ The storage backend type for Velero. `minio` will use an in-cluster MinIO deploy ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -1098,7 +1168,7 @@ The secret access key (password) for the external S3-compatible bucket. ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.dr.velero.overrides.tolerations @@ -1113,13 +1183,13 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.dr.velero.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -1137,7 +1207,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -1217,7 +1287,7 @@ The Time To Live (TTL) of the backups created by the backup schedules (default ` ### Description -the base domain used for all the KFD ingresses, if in the nginx dual configuration, it should be the same as the .spec.distribution.modules.ingress.dns.private.name zone +The base domain used for all the KFD infrastructural ingresses. If using the nginx dual type, this value should be the same as the domain associated with the `internal` ingress class. ## .spec.distribution.modules.ingress.certManager @@ -1228,6 +1298,10 @@ the base domain used for all the KFD ingresses, if in the nginx dual configurati | [clusterIssuer](#specdistributionmodulesingresscertmanagerclusterissuer) | `object` | Required | | [overrides](#specdistributionmodulesingresscertmanageroverrides) | `object` | Optional | +### Description + +Configuration for the cert-manager package. Required even if `ingress.nginx.type` is `none`, cert-manager is used for managing other certificates in the cluster besides the TLS termination certificates for the ingresses. + ## .spec.distribution.modules.ingress.certManager.clusterIssuer ### Properties @@ -1239,33 +1313,37 @@ the base domain used for all the KFD ingresses, if in the nginx dual configurati | [solvers](#specdistributionmodulesingresscertmanagerclusterissuersolvers) | `array` | Optional | | [type](#specdistributionmodulesingresscertmanagerclusterissuertype) | `string` | Optional | +### Description + +Configuration for the cert-manager's ACME clusterIssuer used to request certificates from Let's Encrypt. + ## .spec.distribution.modules.ingress.certManager.clusterIssuer.email ### Description -The email of the cluster issuer +The email address to use during the certificate issuing process. ## .spec.distribution.modules.ingress.certManager.clusterIssuer.name ### Description -The name of the cluster issuer +Name of the clusterIssuer. ## .spec.distribution.modules.ingress.certManager.clusterIssuer.solvers ### Description -The custom solvers configurations +List of challenge solvers to use instead of the default one for the `http01` challenge. ## .spec.distribution.modules.ingress.certManager.clusterIssuer.type ### Description -The type of the cluster issuer, must be ***http01*** +The type of the clusterIssuer. Only `http01` challenge is supported for KFDDistribution kind. See solvers for arbitrary configurations. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -1284,7 +1362,7 @@ The type of the cluster issuer, must be ***http01*** ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.ingress.certManager.overrides.tolerations @@ -1299,13 +1377,13 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.ingress.certManager.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -1323,7 +1401,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -1357,7 +1435,7 @@ The value of the toleration ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.ingress.forecastle.overrides.tolerations @@ -1372,13 +1450,13 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.ingress.forecastle.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -1396,7 +1474,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -1421,7 +1499,7 @@ The value of the toleration ### Description -Configurations for the nginx ingress controller module +Configurations for the Ingress nginx controller package. ## .spec.distribution.modules.ingress.nginx.overrides @@ -1436,7 +1514,7 @@ Configurations for the nginx ingress controller module ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.ingress.nginx.overrides.tolerations @@ -1451,13 +1529,13 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.ingress.nginx.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -1475,7 +1553,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -1501,11 +1579,11 @@ The value of the toleration ### Description -The provider of the TLS certificate, must be ***none***, ***certManager*** or ***secret*** +The provider of the TLS certificates for the ingresses, one of: `none`, `certManager`, or `secret`. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:--------------| @@ -1523,25 +1601,42 @@ The provider of the TLS certificate, must be ***none***, ***certManager*** or ** | [cert](#specdistributionmodulesingressnginxtlssecretcert) | `string` | Required | | [key](#specdistributionmodulesingressnginxtlssecretkey) | `string` | Required | +### Description + +Kubernetes TLS secret for the ingresses TLS certificate. + ## .spec.distribution.modules.ingress.nginx.tls.secret.ca +### Description + +The Certificate Authority certificate file's content. You can use the `"{file://}"` notation to get the content from a file. + ## .spec.distribution.modules.ingress.nginx.tls.secret.cert ### Description -The certificate file content or you can use the file notation to get the content from a file +The certificate file's content. You can use the `"{file://}"` notation to get the content from a file. ## .spec.distribution.modules.ingress.nginx.tls.secret.key +### Description + +The signing key file's content. You can use the `"{file://}"` notation to get the content from a file. + ## .spec.distribution.modules.ingress.nginx.type ### Description -The type of the nginx ingress controller, must be ***none***, ***single*** or ***dual*** +The type of the Ingress nginx controller, options are: +- `none`: no ingress controller will be installed and no infrastructural ingresses will be created. +- `single`: a single ingress controller with ingress class `nginx` will be installed to manage all the ingress resources, infrastructural ingresses will be created. +- `dual`: two independent ingress controllers will be installed, one for the `internal` ingress class intended for private ingresses and one for the `external` ingress class intended for public ingresses. KFD infrastructural ingresses wil use the `internal` ingress class when using the dual type. + +Default is `single`. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -1559,6 +1654,10 @@ The type of the nginx ingress controller, must be ***none***, ***single*** or ** | [nodeSelector](#specdistributionmodulesingressoverridesnodeselector) | `object` | Optional | | [tolerations](#specdistributionmodulesingressoverridestolerations) | `array` | Optional | +### Description + +Override the common configuration with a particular configuration for the Ingress module. + ## .spec.distribution.modules.ingress.overrides.ingresses ### Properties @@ -1581,25 +1680,25 @@ The type of the nginx ingress controller, must be ***none***, ***single*** or ** ### Description -If true, the ingress will not have authentication +If true, the ingress will not have authentication even if `.spec.modules.auth.provider.type` is SSO or Basic Auth. ## .spec.distribution.modules.ingress.overrides.ingresses.forecastle.host ### Description -The host of the ingress +Use this host for the ingress instead of the default one. ## .spec.distribution.modules.ingress.overrides.ingresses.forecastle.ingressClass ### Description -The ingress class of the ingress +Use this ingress class for the ingress instead of the default one. ## .spec.distribution.modules.ingress.overrides.nodeSelector ### Description -The node selector to use to place the pods for the ingress module +Set to override the node selector used to place the pods of the Ingress module. ## .spec.distribution.modules.ingress.overrides.tolerations @@ -1614,13 +1713,13 @@ The node selector to use to place the pods for the ingress module ### Description -The tolerations that will be added to the pods for the ingress module +Set to override the tolerations that will be added to the pods of the Ingress module. ## .spec.distribution.modules.ingress.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -1638,7 +1737,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -1666,6 +1765,10 @@ The value of the toleration | [overrides](#specdistributionmodulesloggingoverrides) | `object` | Optional | | [type](#specdistributionmodulesloggingtype) | `string` | Required | +### Description + +Configuration for the Logging module. + ## .spec.distribution.modules.logging.cerebro ### Properties @@ -1674,6 +1777,10 @@ The value of the toleration |:-------------------------------------------------------------|:---------|:---------| | [overrides](#specdistributionmodulesloggingcerebrooverrides) | `object` | Optional | +### Description + +DEPRECATED since KFD v1.26.6, 1.27.5, v1.28.0. + ## .spec.distribution.modules.logging.cerebro.overrides ### Properties @@ -1687,7 +1794,7 @@ The value of the toleration ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.logging.cerebro.overrides.tolerations @@ -1702,13 +1809,13 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.logging.cerebro.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -1726,7 +1833,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -1756,55 +1863,55 @@ The value of the toleration ### Description -when using the customOutputs logging type, you need to manually specify the spec of the several Output and ClusterOutputs that the Logging Operator expects to forward the logs collected by the pre-defined flows. +When using the `customOutputs` logging type, you need to manually specify the spec of the several `Output` and `ClusterOutputs` that the Logging Operator expects to forward the logs collected by the pre-defined flows. ## .spec.distribution.modules.logging.customOutputs.audit ### Description -This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow. +This value defines where the output from the `audit` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}` ## .spec.distribution.modules.logging.customOutputs.errors ### Description -This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow. +This value defines where the output from the `errors` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}` ## .spec.distribution.modules.logging.customOutputs.events ### Description -This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow. +This value defines where the output from the `events` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}` ## .spec.distribution.modules.logging.customOutputs.infra ### Description -This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow. +This value defines where the output from the `infra` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}` ## .spec.distribution.modules.logging.customOutputs.ingressNginx ### Description -This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow. +This value defines where the output from the `ingressNginx` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}` ## .spec.distribution.modules.logging.customOutputs.kubernetes ### Description -This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow. +This value defines where the output from the `kubernetes` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}` ## .spec.distribution.modules.logging.customOutputs.systemdCommon ### Description -This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow. +This value defines where the output from the `systemdCommon` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}` ## .spec.distribution.modules.logging.customOutputs.systemdEtcd ### Description -This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow. +This value defines where the output from the `systemdEtcd` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}` ## .spec.distribution.modules.logging.loki @@ -1816,11 +1923,19 @@ This value defines where the output from Flow will be sent. Will be the `spec` s | [externalEndpoint](#specdistributionmoduleslogginglokiexternalendpoint) | `object` | Optional | | [resources](#specdistributionmoduleslogginglokiresources) | `object` | Optional | +### Description + +Configuration for the Loki package. + ## .spec.distribution.modules.logging.loki.backend +### Description + +The storage backend type for Loki. `minio` will use an in-cluster MinIO deployment for object storage, `externalEndpoint` can be used to point to an external object storage instead of deploying an in-cluster MinIO. + ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -1839,35 +1954,39 @@ This value defines where the output from Flow will be sent. Will be the `spec` s | [insecure](#specdistributionmoduleslogginglokiexternalendpointinsecure) | `boolean` | Optional | | [secretAccessKey](#specdistributionmoduleslogginglokiexternalendpointsecretaccesskey) | `string` | Optional | +### Description + +Configuration for Loki's external storage backend. + ## .spec.distribution.modules.logging.loki.externalEndpoint.accessKeyId ### Description -The access key id of the loki external endpoint +The access key ID (username) for the external S3-compatible bucket. ## .spec.distribution.modules.logging.loki.externalEndpoint.bucketName ### Description -The bucket name of the loki external endpoint +The bucket name of the external S3-compatible object storage. ## .spec.distribution.modules.logging.loki.externalEndpoint.endpoint ### Description -The endpoint of the loki external endpoint +External S3-compatible endpoint for Loki's storage. ## .spec.distribution.modules.logging.loki.externalEndpoint.insecure ### Description -If true, the loki external endpoint will be insecure +If true, will use HTTP as protocol instead of HTTPS. ## .spec.distribution.modules.logging.loki.externalEndpoint.secretAccessKey ### Description -The secret access key of the loki external endpoint +The secret access key (password) for the external S3-compatible bucket. ## .spec.distribution.modules.logging.loki.resources @@ -1891,13 +2010,13 @@ The secret access key of the loki external endpoint ### Description -The cpu limit for the loki pods +The CPU limit for the Pod. Example: `1000m`. ## .spec.distribution.modules.logging.loki.resources.limits.memory ### Description -The memory limit for the opensearch pods +The memory limit for the Pod. Example: `1G`. ## .spec.distribution.modules.logging.loki.resources.requests @@ -1912,13 +2031,13 @@ The memory limit for the opensearch pods ### Description -The cpu request for the prometheus pods +The CPU request for the Pod, in cores. Example: `500m`. ## .spec.distribution.modules.logging.loki.resources.requests.memory ### Description -The memory request for the opensearch pods +The memory request for the Pod. Example: `500M`. ## .spec.distribution.modules.logging.minio @@ -1930,6 +2049,10 @@ The memory request for the opensearch pods | [rootUser](#specdistributionmodulesloggingminiorootuser) | `object` | Optional | | [storageSize](#specdistributionmodulesloggingminiostoragesize) | `string` | Optional | +### Description + +Configuration for Logging's MinIO deployment. + ## .spec.distribution.modules.logging.minio.overrides ### Properties @@ -1943,7 +2066,7 @@ The memory request for the opensearch pods ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.logging.minio.overrides.tolerations @@ -1958,13 +2081,13 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.logging.minio.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -1982,7 +2105,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -2008,19 +2131,19 @@ The value of the toleration ### Description -The password of the minio root user +The password for the default MinIO root user. ## .spec.distribution.modules.logging.minio.rootUser.username ### Description -The username of the minio root user +The username for the default MinIO root user. ## .spec.distribution.modules.logging.minio.storageSize ### Description -The PVC size for each minio disk, 6 disks total +The PVC size for each MinIO disk, 6 disks total. ## .spec.distribution.modules.logging.opensearch @@ -2046,7 +2169,7 @@ The PVC size for each minio disk, 6 disks total ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.logging.opensearch.overrides.tolerations @@ -2061,13 +2184,13 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.logging.opensearch.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -2085,7 +2208,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -2120,13 +2243,13 @@ The value of the toleration ### Description -The cpu limit for the loki pods +The CPU limit for the Pod. Example: `1000m`. ## .spec.distribution.modules.logging.opensearch.resources.limits.memory ### Description -The memory limit for the opensearch pods +The memory limit for the Pod. Example: `1G`. ## .spec.distribution.modules.logging.opensearch.resources.requests @@ -2141,29 +2264,29 @@ The memory limit for the opensearch pods ### Description -The cpu request for the prometheus pods +The CPU request for the Pod, in cores. Example: `500m`. ## .spec.distribution.modules.logging.opensearch.resources.requests.memory ### Description -The memory request for the opensearch pods +The memory request for the Pod. Example: `500M`. ## .spec.distribution.modules.logging.opensearch.storageSize ### Description -The storage size for the opensearch pods +The storage size for the OpenSearch volumes. ## .spec.distribution.modules.logging.opensearch.type ### Description -The type of the opensearch, must be ***single*** or ***triple*** +The type of OpenSearch deployment. One of: `single` for a single replica or `triple` for an HA 3-replicas deployment. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -2178,6 +2301,10 @@ The type of the opensearch, must be ***single*** or ***triple*** |:--------------------------------------------------------------|:---------|:---------| | [overrides](#specdistributionmodulesloggingoperatoroverrides) | `object` | Optional | +### Description + +Configuration for the Logging Operator. + ## .spec.distribution.modules.logging.operator.overrides ### Properties @@ -2191,7 +2318,7 @@ The type of the opensearch, must be ***single*** or ***triple*** ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.logging.operator.overrides.tolerations @@ -2206,13 +2333,13 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.logging.operator.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -2230,7 +2357,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -2253,13 +2380,17 @@ The value of the toleration | [nodeSelector](#specdistributionmodulesloggingoverridesnodeselector) | `object` | Optional | | [tolerations](#specdistributionmodulesloggingoverridestolerations) | `array` | Optional | +### Description + +Override the common configuration with a particular configuration for the module. + ## .spec.distribution.modules.logging.overrides.ingresses ## .spec.distribution.modules.logging.overrides.nodeSelector ### Description -The node selector to use to place the pods for the security module +Set to override the node selector used to place the pods of the module. ## .spec.distribution.modules.logging.overrides.tolerations @@ -2274,13 +2405,13 @@ The node selector to use to place the pods for the security module ### Description -The tolerations that will be added to the pods for the monitoring module +Set to override the tolerations that will be added to the pods of the module. ## .spec.distribution.modules.logging.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -2298,7 +2429,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -2315,11 +2446,17 @@ The value of the toleration ### Description -selects the logging stack. Choosing none will disable the centralized logging. Choosing opensearch will deploy and configure the Logging Operator and an OpenSearch cluster (can be single or triple for HA) where the logs will be stored. Choosing loki will use a distributed Grafana Loki instead of OpenSearh for storage. Choosing customOuput the Logging Operator will be deployed and installed but with no local storage, you will have to create the needed Outputs and ClusterOutputs to ship the logs to your desired storage. +Selects the logging stack. Options are: +- `none`: will disable the centralized logging. +- `opensearch`: will deploy and configure the Logging Operator and an OpenSearch cluster (can be single or triple for HA) where the logs will be stored. +- `loki`: will use a distributed Grafana Loki instead of OpenSearh for storage. +- `customOuputs`: the Logging Operator will be deployed and installed but with no local storage, you will have to create the needed Outputs and ClusterOutputs to ship the logs to your desired storage. + +Default is `opensearch`. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:----------------| @@ -2348,7 +2485,7 @@ selects the logging stack. Choosing none will disable the centralized logging. C ### Description -configuration for the Monitoring module components +Configuration for the Monitoring module. ## .spec.distribution.modules.monitoring.alertmanager @@ -2364,19 +2501,19 @@ configuration for the Monitoring module components ### Description -The webhook url to send deadman switch monitoring, for example to use with healthchecks.io +The webhook URL to send dead man's switch monitoring, for example to use with healthchecks.io. ## .spec.distribution.modules.monitoring.alertmanager.installDefaultRules ### Description -If true, the default rules will be installed +Set to false to avoid installing the Prometheus rules (alerts) included with the distribution. ## .spec.distribution.modules.monitoring.alertmanager.slackWebhookUrl ### Description -The slack webhook url to send alerts +The Slack webhook URL where to send the infrastructural and workload alerts to. ## .spec.distribution.modules.monitoring.blackboxExporter @@ -2399,7 +2536,7 @@ The slack webhook url to send alerts ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.monitoring.blackboxExporter.overrides.tolerations @@ -2414,13 +2551,13 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.monitoring.blackboxExporter.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -2438,7 +2575,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -2482,7 +2619,7 @@ Notice that by default anonymous access is enabled. ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.monitoring.grafana.overrides.tolerations @@ -2497,13 +2634,13 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.monitoring.grafana.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -2521,7 +2658,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -2567,7 +2704,7 @@ More details in [Grafana's documentation](https://grafana.com/docs/grafana/lates ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.monitoring.kubeStateMetrics.overrides.tolerations @@ -2582,13 +2719,13 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.monitoring.kubeStateMetrics.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -2606,7 +2743,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -2630,15 +2767,19 @@ The value of the toleration | [overrides](#specdistributionmodulesmonitoringmimiroverrides) | `object` | Optional | | [retentionTime](#specdistributionmodulesmonitoringmimirretentiontime) | `string` | Optional | +### Description + +Configuration for the Mimir package. + ## .spec.distribution.modules.monitoring.mimir.backend ### Description -The backend for the mimir pods, must be ***minio*** or ***externalEndpoint*** +The storage backend type for Mimir. `minio` will use an in-cluster MinIO deployment for object storage, `externalEndpoint` can be used to point to an external S3-compatible object storage instead of deploying an in-cluster MinIO. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -2657,35 +2798,39 @@ The backend for the mimir pods, must be ***minio*** or ***externalEndpoint*** | [insecure](#specdistributionmodulesmonitoringmimirexternalendpointinsecure) | `boolean` | Optional | | [secretAccessKey](#specdistributionmodulesmonitoringmimirexternalendpointsecretaccesskey) | `string` | Optional | +### Description + +Configuration for Mimir's external storage backend. + ## .spec.distribution.modules.monitoring.mimir.externalEndpoint.accessKeyId ### Description -The access key id of the external mimir backend +The access key ID (username) for the external S3-compatible bucket. ## .spec.distribution.modules.monitoring.mimir.externalEndpoint.bucketName ### Description -The bucket name of the external mimir backend +The bucket name of the external S3-compatible object storage. ## .spec.distribution.modules.monitoring.mimir.externalEndpoint.endpoint ### Description -The endpoint of the external mimir backend +External S3-compatible endpoint for Mimir's storage. ## .spec.distribution.modules.monitoring.mimir.externalEndpoint.insecure ### Description -If true, the external mimir backend will not use tls +If true, will use HTTP as protocol instead of HTTPS. ## .spec.distribution.modules.monitoring.mimir.externalEndpoint.secretAccessKey ### Description -The secret access key of the external mimir backend +The secret access key (password) for the external S3-compatible bucket. ## .spec.distribution.modules.monitoring.mimir.overrides @@ -2700,7 +2845,7 @@ The secret access key of the external mimir backend ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.monitoring.mimir.overrides.tolerations @@ -2715,13 +2860,13 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.monitoring.mimir.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -2739,7 +2884,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -2756,7 +2901,7 @@ The value of the toleration ### Description -The retention time for the mimir pods +The retention time for the logs stored in Mimir. Default is `30d`. Value must match the regular expression `[0-9]+(ns|us|µs|ms|s|m|h|d|w|y)` where y = 365 days. ## .spec.distribution.modules.monitoring.minio @@ -2768,6 +2913,10 @@ The retention time for the mimir pods | [rootUser](#specdistributionmodulesmonitoringminiorootuser) | `object` | Optional | | [storageSize](#specdistributionmodulesmonitoringminiostoragesize) | `string` | Optional | +### Description + +Configuration for Monitoring's MinIO deployment. + ## .spec.distribution.modules.monitoring.minio.overrides ### Properties @@ -2781,7 +2930,7 @@ The retention time for the mimir pods ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.monitoring.minio.overrides.tolerations @@ -2796,13 +2945,13 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.monitoring.minio.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -2820,7 +2969,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -2846,19 +2995,19 @@ The value of the toleration ### Description -The password for the minio root user +The password for the default MinIO root user. ## .spec.distribution.modules.monitoring.minio.rootUser.username ### Description -The username for the minio root user +The username for the default MinIO root user. ## .spec.distribution.modules.monitoring.minio.storageSize ### Description -The storage size for the minio pods +The PVC size for each MinIO disk, 6 disks total. ## .spec.distribution.modules.monitoring.overrides @@ -2870,13 +3019,17 @@ The storage size for the minio pods | [nodeSelector](#specdistributionmodulesmonitoringoverridesnodeselector) | `object` | Optional | | [tolerations](#specdistributionmodulesmonitoringoverridestolerations) | `array` | Optional | +### Description + +Override the common configuration with a particular configuration for the module. + ## .spec.distribution.modules.monitoring.overrides.ingresses ## .spec.distribution.modules.monitoring.overrides.nodeSelector ### Description -The node selector to use to place the pods for the security module +Set to override the node selector used to place the pods of the module. ## .spec.distribution.modules.monitoring.overrides.tolerations @@ -2891,13 +3044,13 @@ The node selector to use to place the pods for the security module ### Description -The tolerations that will be added to the pods for the monitoring module +Set to override the tolerations that will be added to the pods of the module. ## .spec.distribution.modules.monitoring.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -2915,7 +3068,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -2970,13 +3123,13 @@ Set this option to ship the collected metrics to a remote Prometheus receiver. ### Description -The cpu limit for the loki pods +The CPU limit for the Pod. Example: `1000m`. ## .spec.distribution.modules.monitoring.prometheus.resources.limits.memory ### Description -The memory limit for the opensearch pods +The memory limit for the Pod. Example: `1G`. ## .spec.distribution.modules.monitoring.prometheus.resources.requests @@ -2991,31 +3144,31 @@ The memory limit for the opensearch pods ### Description -The cpu request for the prometheus pods +The CPU request for the Pod, in cores. Example: `500m`. ## .spec.distribution.modules.monitoring.prometheus.resources.requests.memory ### Description -The memory request for the opensearch pods +The memory request for the Pod. Example: `500M`. ## .spec.distribution.modules.monitoring.prometheus.retentionSize ### Description -The retention size for the k8s Prometheus instance. +The retention size for the `k8s` Prometheus instance. ## .spec.distribution.modules.monitoring.prometheus.retentionTime ### Description -The retention time for the K8s Prometheus instance. +The retention time for the `k8s` Prometheus instance. ## .spec.distribution.modules.monitoring.prometheus.storageSize ### Description -The storage size for the k8s Prometheus instance. +The storage size for the `k8s` Prometheus instance. ## .spec.distribution.modules.monitoring.prometheusAgent @@ -3056,13 +3209,13 @@ Set this option to ship the collected metrics to a remote Prometheus receiver. ### Description -The cpu limit for the loki pods +The CPU limit for the Pod. Example: `1000m`. ## .spec.distribution.modules.monitoring.prometheusAgent.resources.limits.memory ### Description -The memory limit for the opensearch pods +The memory limit for the Pod. Example: `1G`. ## .spec.distribution.modules.monitoring.prometheusAgent.resources.requests @@ -3077,28 +3230,30 @@ The memory limit for the opensearch pods ### Description -The cpu request for the prometheus pods +The CPU request for the Pod, in cores. Example: `500m`. ## .spec.distribution.modules.monitoring.prometheusAgent.resources.requests.memory ### Description -The memory request for the opensearch pods +The memory request for the Pod. Example: `500M`. ## .spec.distribution.modules.monitoring.type ### Description -The type of the monitoring, must be ***none***, ***prometheus***, ***prometheusAgent*** or ***mimir***. +The type of the monitoring, must be `none`, `prometheus`, `prometheusAgent` or `mimir`. - `none`: will disable the whole monitoring stack. - `prometheus`: will install Prometheus Operator and a preconfigured Prometheus instace, Alertmanager, a set of alert rules, exporters needed to monitor all the components of the cluster, Grafana and a series of dashboards to view the collected metrics, and more. - `prometheusAgent`: wil install Prometheus operator, an instance of Prometheus in Agent mode (no alerting, no queries, no storage), and all the exporters needed to get metrics for the status of the cluster and the workloads. Useful when having a centralized (remote) Prometheus where to ship the metrics and not storing them locally in the cluster. -- `mimir`: will install the same as the `prometheus` option, and in addition Grafana Mimir that allows for longer retention of metrics and the usage of Object Storage. +- `mimir`: will install the same as the `prometheus` option, plus Grafana Mimir that allows for longer retention of metrics and the usage of Object Storage. + +Default is `prometheus`. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:------------------| @@ -3128,7 +3283,7 @@ The type of the monitoring, must be ***none***, ***prometheus***, ***prometheusA ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.monitoring.x509Exporter.overrides.tolerations @@ -3143,13 +3298,13 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.monitoring.x509Exporter.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -3167,7 +3322,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -3191,6 +3346,10 @@ The value of the toleration | [tigeraOperator](#specdistributionmodulesnetworkingtigeraoperator) | `object` | Optional | | [type](#specdistributionmodulesnetworkingtype) | `string` | Required | +### Description + +Configuration for the Networking module. + ## .spec.distribution.modules.networking.cilium ### Properties @@ -3203,6 +3362,10 @@ The value of the toleration ## .spec.distribution.modules.networking.cilium.maskSize +### Description + +The mask size to use for the Pods network on each node. + ## .spec.distribution.modules.networking.cilium.overrides ### Properties @@ -3216,7 +3379,7 @@ The value of the toleration ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.networking.cilium.overrides.tolerations @@ -3231,13 +3394,13 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.networking.cilium.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -3255,7 +3418,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -3270,6 +3433,10 @@ The value of the toleration ## .spec.distribution.modules.networking.cilium.podCidr +### Description + +Allows specifing a CIDR for the Pods network different from `.spec.kubernetes.podCidr`. If not set the default is to use `.spec.kubernetes.podCidr`. + ### Constraints **pattern**: the string must match the following regular expression: @@ -3290,13 +3457,17 @@ The value of the toleration | [nodeSelector](#specdistributionmodulesnetworkingoverridesnodeselector) | `object` | Optional | | [tolerations](#specdistributionmodulesnetworkingoverridestolerations) | `array` | Optional | +### Description + +Override the common configuration with a particular configuration for the module. + ## .spec.distribution.modules.networking.overrides.ingresses ## .spec.distribution.modules.networking.overrides.nodeSelector ### Description -The node selector to use to place the pods for the security module +Set to override the node selector used to place the pods of the module. ## .spec.distribution.modules.networking.overrides.tolerations @@ -3311,13 +3482,13 @@ The node selector to use to place the pods for the security module ### Description -The tolerations that will be added to the pods for the monitoring module +Set to override the tolerations that will be added to the pods of the module. ## .spec.distribution.modules.networking.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -3335,7 +3506,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -3369,7 +3540,7 @@ The value of the toleration ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.networking.tigeraOperator.overrides.tolerations @@ -3384,13 +3555,13 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.networking.tigeraOperator.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -3408,7 +3579,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -3425,11 +3596,11 @@ The value of the toleration ### Description -The type of networking to use, either ***none***, ***calico*** or ***cilium*** +The type of CNI plugin to use, either `none`, `calico` (Tigera Operator) or `cilium`. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -3448,6 +3619,10 @@ The type of networking to use, either ***none***, ***calico*** or ***cilium*** | [overrides](#specdistributionmodulespolicyoverrides) | `object` | Optional | | [type](#specdistributionmodulespolicytype) | `string` | Required | +### Description + +Configuration for the Policy module. + ## .spec.distribution.modules.policy.gatekeeper ### Properties @@ -3459,6 +3634,10 @@ The type of networking to use, either ***none***, ***calico*** or ***cilium*** | [installDefaultPolicies](#specdistributionmodulespolicygatekeeperinstalldefaultpolicies) | `boolean` | Required | | [overrides](#specdistributionmodulespolicygatekeeperoverrides) | `object` | Optional | +### Description + +Configuration for the Gatekeeper package. + ## .spec.distribution.modules.policy.gatekeeper.additionalExcludedNamespaces ### Description @@ -3469,11 +3648,11 @@ This parameter adds namespaces to Gatekeeper's exemption list, so it will not en ### Description -The enforcement action to use for the gatekeeper module +The default enforcement action to use for the included constraints. `deny` will block the admission when violations to the policies are found, `warn` will show a message to the user but will admit the violating requests and `dryrun` won't give any feedback to the user but it will log the violations. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -3485,7 +3664,7 @@ The enforcement action to use for the gatekeeper module ### Description -If true, the default policies will be installed +Set to `false` to avoid installing the default Gatekeeper policies (constraints templates and constraints) included with the distribution. ## .spec.distribution.modules.policy.gatekeeper.overrides @@ -3500,7 +3679,7 @@ If true, the default policies will be installed ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.policy.gatekeeper.overrides.tolerations @@ -3515,13 +3694,13 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.policy.gatekeeper.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -3539,7 +3718,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -3563,17 +3742,21 @@ The value of the toleration | [overrides](#specdistributionmodulespolicykyvernooverrides) | `object` | Optional | | [validationFailureAction](#specdistributionmodulespolicykyvernovalidationfailureaction) | `string` | Required | +### Description + +Configuration for the Kyverno package. + ## .spec.distribution.modules.policy.kyverno.additionalExcludedNamespaces ### Description -This parameter adds namespaces to Kyverno's exemption list, so it will not enforce the constraints on them. +This parameter adds namespaces to Kyverno's exemption list, so it will not enforce the policies on them. ## .spec.distribution.modules.policy.kyverno.installDefaultPolicies ### Description -If true, the default policies will be installed +Set to `false` to avoid installing the default Kyverno policies included with distribution. ## .spec.distribution.modules.policy.kyverno.overrides @@ -3588,7 +3771,7 @@ If true, the default policies will be installed ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.policy.kyverno.overrides.tolerations @@ -3603,13 +3786,13 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.policy.kyverno.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -3627,7 +3810,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -3644,11 +3827,11 @@ The value of the toleration ### Description -The validation failure action to use for the kyverno module +The validation failure action to use for the policies, `Enforce` will block when a request does not comply with the policies and `Audit` will not block but log when a request does not comply with the policies. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:----------| @@ -3665,13 +3848,17 @@ The validation failure action to use for the kyverno module | [nodeSelector](#specdistributionmodulespolicyoverridesnodeselector) | `object` | Optional | | [tolerations](#specdistributionmodulespolicyoverridestolerations) | `array` | Optional | +### Description + +Override the common configuration with a particular configuration for the module. + ## .spec.distribution.modules.policy.overrides.ingresses ## .spec.distribution.modules.policy.overrides.nodeSelector ### Description -The node selector to use to place the pods for the security module +Set to override the node selector used to place the pods of the module. ## .spec.distribution.modules.policy.overrides.tolerations @@ -3686,13 +3873,13 @@ The node selector to use to place the pods for the security module ### Description -The tolerations that will be added to the pods for the monitoring module +Set to override the tolerations that will be added to the pods of the module. ## .spec.distribution.modules.policy.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -3710,7 +3897,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -3727,11 +3914,13 @@ The value of the toleration ### Description -The type of security to use, either ***none***, ***gatekeeper*** or ***kyverno*** +The type of policy enforcement to use, either `none`, `gatekeeper` or `kyverno`. + +Default is `none`. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------| @@ -3750,6 +3939,10 @@ The type of security to use, either ***none***, ***gatekeeper*** or ***kyverno** | [tempo](#specdistributionmodulestracingtempo) | `object` | Optional | | [type](#specdistributionmodulestracingtype) | `string` | Required | +### Description + +Configuration for the Tracing module. + ## .spec.distribution.modules.tracing.minio ### Properties @@ -3760,6 +3953,10 @@ The type of security to use, either ***none***, ***gatekeeper*** or ***kyverno** | [rootUser](#specdistributionmodulestracingminiorootuser) | `object` | Optional | | [storageSize](#specdistributionmodulestracingminiostoragesize) | `string` | Optional | +### Description + +Configuration for Tracing's MinIO deployment. + ## .spec.distribution.modules.tracing.minio.overrides ### Properties @@ -3773,7 +3970,7 @@ The type of security to use, either ***none***, ***gatekeeper*** or ***kyverno** ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.tracing.minio.overrides.tolerations @@ -3788,13 +3985,13 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.tracing.minio.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -3812,7 +4009,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -3838,19 +4035,19 @@ The value of the toleration ### Description -The password for the minio root user +The password for the default MinIO root user. ## .spec.distribution.modules.tracing.minio.rootUser.username ### Description -The username for the minio root user +The username for the default MinIO root user. ## .spec.distribution.modules.tracing.minio.storageSize ### Description -The storage size for the minio pods +The PVC size for each MinIO disk, 6 disks total. ## .spec.distribution.modules.tracing.overrides @@ -3862,13 +4059,17 @@ The storage size for the minio pods | [nodeSelector](#specdistributionmodulestracingoverridesnodeselector) | `object` | Optional | | [tolerations](#specdistributionmodulestracingoverridestolerations) | `array` | Optional | +### Description + +Override the common configuration with a particular configuration for the module. + ## .spec.distribution.modules.tracing.overrides.ingresses ## .spec.distribution.modules.tracing.overrides.nodeSelector ### Description -The node selector to use to place the pods for the security module +Set to override the node selector used to place the pods of the module. ## .spec.distribution.modules.tracing.overrides.tolerations @@ -3883,13 +4084,13 @@ The node selector to use to place the pods for the security module ### Description -The tolerations that will be added to the pods for the monitoring module +Set to override the tolerations that will be added to the pods of the module. ## .spec.distribution.modules.tracing.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -3907,7 +4108,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -3931,15 +4132,19 @@ The value of the toleration | [overrides](#specdistributionmodulestracingtempooverrides) | `object` | Optional | | [retentionTime](#specdistributionmodulestracingtemporetentiontime) | `string` | Optional | +### Description + +Configuration for the Tempo package. + ## .spec.distribution.modules.tracing.tempo.backend ### Description -The backend for the tempo pods, must be ***minio*** or ***externalEndpoint*** +The storage backend type for Tempo. `minio` will use an in-cluster MinIO deployment for object storage, `externalEndpoint` can be used to point to an external S3-compatible object storage instead of deploying an in-cluster MinIO. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -3958,35 +4163,39 @@ The backend for the tempo pods, must be ***minio*** or ***externalEndpoint*** | [insecure](#specdistributionmodulestracingtempoexternalendpointinsecure) | `boolean` | Optional | | [secretAccessKey](#specdistributionmodulestracingtempoexternalendpointsecretaccesskey) | `string` | Optional | +### Description + +Configuration for Tempo's external storage backend. + ## .spec.distribution.modules.tracing.tempo.externalEndpoint.accessKeyId ### Description -The access key id of the external tempo backend +The access key ID (username) for the external S3-compatible bucket. ## .spec.distribution.modules.tracing.tempo.externalEndpoint.bucketName ### Description -The bucket name of the external tempo backend +The bucket name of the external S3-compatible object storage. ## .spec.distribution.modules.tracing.tempo.externalEndpoint.endpoint ### Description -The endpoint of the external tempo backend +External S3-compatible endpoint for Tempo's storage. ## .spec.distribution.modules.tracing.tempo.externalEndpoint.insecure ### Description -If true, the external tempo backend will not use tls +If true, will use HTTP as protocol instead of HTTPS. ## .spec.distribution.modules.tracing.tempo.externalEndpoint.secretAccessKey ### Description -The secret access key of the external tempo backend +The secret access key (password) for the external S3-compatible bucket. ## .spec.distribution.modules.tracing.tempo.overrides @@ -4001,7 +4210,7 @@ The secret access key of the external tempo backend ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.tracing.tempo.overrides.tolerations @@ -4016,13 +4225,13 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.tracing.tempo.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -4040,7 +4249,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -4057,17 +4266,19 @@ The value of the toleration ### Description -The retention time for the tempo pods +The retention time for the traces stored in Tempo. ## .spec.distribution.modules.tracing.type ### Description -The type of tracing to use, either ***none*** or ***tempo*** +The type of tracing to use, either `none` or `tempo`. `none` will disable the Tracing module and `tempo` will install a Grafana Tempo deployment. + +Default is `tempo`. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:--------| @@ -4076,6 +4287,10 @@ The type of tracing to use, either ***none*** or ***tempo*** ## .spec.distributionVersion +### Description + +Defines which KFD version will be installed and, in consequence, the Kubernetes version used to create the cluster. It supports git tags and branches. Example: `v1.30.1`. + ### Constraints **minimum length**: the minimum number of characters for this string is: `1` @@ -4102,14 +4317,15 @@ The type of tracing to use, either ***none*** or ***tempo*** ### Properties -| Property | Type | Required | -|:-----------------------------------------------|:---------|:---------| -| [chart](#specpluginshelmreleaseschart) | `string` | Required | -| [name](#specpluginshelmreleasesname) | `string` | Required | -| [namespace](#specpluginshelmreleasesnamespace) | `string` | Required | -| [set](#specpluginshelmreleasesset) | `array` | Optional | -| [values](#specpluginshelmreleasesvalues) | `array` | Optional | -| [version](#specpluginshelmreleasesversion) | `string` | Optional | +| Property | Type | Required | +|:---------------------------------------------------------------------------------|:----------|:---------| +| [chart](#specpluginshelmreleaseschart) | `string` | Required | +| [disableValidationOnInstall](#specpluginshelmreleasesdisablevalidationoninstall) | `boolean` | Optional | +| [name](#specpluginshelmreleasesname) | `string` | Required | +| [namespace](#specpluginshelmreleasesnamespace) | `string` | Required | +| [set](#specpluginshelmreleasesset) | `array` | Optional | +| [values](#specpluginshelmreleasesvalues) | `array` | Optional | +| [version](#specpluginshelmreleasesversion) | `string` | Optional | ## .spec.plugins.helm.releases.chart @@ -4117,6 +4333,12 @@ The type of tracing to use, either ***none*** or ***tempo*** The chart of the release +## .spec.plugins.helm.releases.disableValidationOnInstall + +### Description + +Disable running `helm diff` validation when installing the plugin, it will still be done when upgrading. + ## .spec.plugins.helm.releases.name ### Description diff --git a/docs/schemas/onpremises-kfd-v1alpha2.md b/docs/schemas/onpremises-kfd-v1alpha2.md index f620b0661..5ef82f6d0 100644 --- a/docs/schemas/onpremises-kfd-v1alpha2.md +++ b/docs/schemas/onpremises-kfd-v1alpha2.md @@ -2,8 +2,14 @@ This document explains the full schema for the `kind: OnPremises` for the `furyctl.yaml` file used by `furyctl`. This configuration file will be used to deploy the Kubernetes Fury Distribution modules and cluster on premises. -An example file can be found [here](https://github.com/sighupio/fury-distribution/blob/feature/schema-docs/templates/config/onpremises-kfd-v1alpha2.yaml.tpl). +An example configuration file can be created by running the following command: +```bash +furyctl create config --kind OnPremises --version v1.29.4 --name example-cluster +``` + +> [!NOTE] +> Replace the version with your desired version of KFD. ## Properties | Property | Type | Required | @@ -13,6 +19,10 @@ An example file can be found [here](https://github.com/sighupio/fury-distributio | [metadata](#metadata) | `object` | Required | | [spec](#spec) | `object` | Required | +### Description + +A KFD Cluster deployed on top of a set of existing VMs. + ## .apiVersion ### Constraints @@ -29,7 +39,7 @@ An example file can be found [here](https://github.com/sighupio/fury-distributio ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------| @@ -96,7 +106,7 @@ Common configuration for all the distribution modules. ### Description -The node selector to use to place the pods for all the KFD modules. Follows Kubernetes selector format. Example: `node.kubernetes.io/role: infra` +The node selector to use to place the pods for all the KFD modules. Follows Kubernetes selector format. Example: `node.kubernetes.io/role: infra`. ## .spec.distribution.common.provider @@ -118,6 +128,8 @@ The provider type. Don't set. FOR INTERNAL USE ONLY. URL of the registry where to pull images from for the Distribution phase. (Default is `registry.sighup.io/fury`). +NOTE: If plugins are pulling from the default registry, the registry will be replaced for the plugin too. + ## .spec.distribution.common.relativeVendorPath ### Description @@ -149,7 +161,7 @@ An array with the tolerations that will be added to the pods for all the KFD mod ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -167,7 +179,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -214,7 +226,7 @@ The behavior of the configmap ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:----------| @@ -422,7 +434,7 @@ The behavior of the secret ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:----------| @@ -632,7 +644,7 @@ Set to override the tolerations that will be added to the pods of the package. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -650,7 +662,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -827,7 +839,7 @@ Set to override the tolerations that will be added to the pods of the Auth modul ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -845,7 +857,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -941,7 +953,7 @@ override default routes for KFD components ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -955,7 +967,7 @@ override default routes for KFD components ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -1070,9 +1082,11 @@ The type of the Auth provider, options are: - `sso`: will protect the infrastructural ingresses with Pomerium and Dex (SSO) and require authentication before accessing them. - `basicAuth`: will protect the infrastructural ingresses with HTTP basic auth (username and password) authentication. +Default is `none`. + ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:------------| @@ -1135,7 +1149,7 @@ Set to override the tolerations that will be added to the pods of the module. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -1153,7 +1167,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -1172,9 +1186,11 @@ The value of the toleration The type of the Disaster Recovery, must be `none` or `on-premises`. `none` disables the module and `on-premises` will install Velero and an optional MinIO deployment. +Default is `none`. + ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:--------------| @@ -1204,7 +1220,7 @@ The storage backend type for Velero. `minio` will use an in-cluster MinIO deploy ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -1291,7 +1307,7 @@ Set to override the tolerations that will be added to the pods of the package. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -1309,7 +1325,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -1430,7 +1446,7 @@ The email address to use during the certificate issuing process. ### Description -Name of the clusterIssuer +Name of the clusterIssuer. ## .spec.distribution.modules.ingress.certManager.clusterIssuer.solvers @@ -1446,7 +1462,7 @@ The type of the clusterIssuer. Only `http01` challenge is supported for on-premi ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -1486,7 +1502,7 @@ Set to override the tolerations that will be added to the pods of the package. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -1504,7 +1520,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -1559,7 +1575,7 @@ Set to override the tolerations that will be added to the pods of the package. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -1577,7 +1593,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -1602,7 +1618,7 @@ The value of the toleration ### Description -Configurations for the nginx ingress controller package. +Configurations for the Ingress nginx controller package. ## .spec.distribution.modules.ingress.nginx.overrides @@ -1638,7 +1654,7 @@ Set to override the tolerations that will be added to the pods of the package. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -1656,7 +1672,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -1686,7 +1702,7 @@ The provider of the TLS certificates for the ingresses, one of: `none`, `certMan ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:--------------| @@ -1730,14 +1746,16 @@ The signing key file's content. You can use the `"{file://}"` notation to ### Description -The type of the nginx ingress controller, options are: +The type of the Ingress nginx controller, options are: - `none`: no ingress controller will be installed and no infrastructural ingresses will be created. - `single`: a single ingress controller with ingress class `nginx` will be installed to manage all the ingress resources, infrastructural ingresses will be created. - `dual`: two independent ingress controllers will be installed, one for the `internal` ingress class intended for private ingresses and one for the `external` ingress class intended for public ingresses. KFD infrastructural ingresses wil use the `internal` ingress class when using the dual type. +Default is `single`. + ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -1799,7 +1817,7 @@ Use this ingress class for the ingress instead of the default one. ### Description -Set to override the node selector used to place the pods of the Ingress module +Set to override the node selector used to place the pods of the Ingress module. ## .spec.distribution.modules.ingress.overrides.tolerations @@ -1814,13 +1832,13 @@ Set to override the node selector used to place the pods of the Ingress module ### Description -Set to override the tolerations that will be added to the pods of the Ingress module +Set to override the tolerations that will be added to the pods of the Ingress module. ## .spec.distribution.modules.ingress.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -1838,7 +1856,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -1882,7 +1900,7 @@ Configuration for the Logging module. ### Description -DEPRECATED in latest versions of KFD. +DEPRECATED since KFD v1.26.6, 1.27.5, v1.28.0. ## .spec.distribution.modules.logging.cerebro.overrides @@ -1918,7 +1936,7 @@ Set to override the tolerations that will be added to the pods of the package. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -1936,7 +1954,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -2038,7 +2056,7 @@ The storage backend type for Loki. `minio` will use an in-cluster MinIO deployme ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -2113,13 +2131,13 @@ The secret access key (password) for the external S3-compatible bucket. ### Description -The cpu limit for the loki pods +The CPU limit for the Pod. Example: `1000m`. ## .spec.distribution.modules.logging.loki.resources.limits.memory ### Description -The memory limit for the prometheus pods +The memory limit for the Pod. Example: `1G`. ## .spec.distribution.modules.logging.loki.resources.requests @@ -2134,13 +2152,13 @@ The memory limit for the prometheus pods ### Description -The cpu request for the loki pods +The CPU request for the Pod, in cores. Example: `500m`. ## .spec.distribution.modules.logging.loki.resources.requests.memory ### Description -The memory request for the prometheus pods +The memory request for the Pod. Example: `500M`. ## .spec.distribution.modules.logging.minio @@ -2190,7 +2208,7 @@ Set to override the tolerations that will be added to the pods of the package. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -2208,7 +2226,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -2293,7 +2311,7 @@ Set to override the tolerations that will be added to the pods of the package. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -2311,7 +2329,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -2346,13 +2364,13 @@ The value of the toleration ### Description -The cpu limit for the loki pods +The CPU limit for the Pod. Example: `1000m`. ## .spec.distribution.modules.logging.opensearch.resources.limits.memory ### Description -The memory limit for the prometheus pods +The memory limit for the Pod. Example: `1G`. ## .spec.distribution.modules.logging.opensearch.resources.requests @@ -2367,13 +2385,13 @@ The memory limit for the prometheus pods ### Description -The cpu request for the loki pods +The CPU request for the Pod, in cores. Example: `500m`. ## .spec.distribution.modules.logging.opensearch.resources.requests.memory ### Description -The memory request for the prometheus pods +The memory request for the Pod. Example: `500M`. ## .spec.distribution.modules.logging.opensearch.storageSize @@ -2389,7 +2407,7 @@ The type of OpenSearch deployment. One of: `single` for a single replica or `tri ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -2442,7 +2460,7 @@ Set to override the tolerations that will be added to the pods of the package. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -2460,7 +2478,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -2514,7 +2532,7 @@ Set to override the tolerations that will be added to the pods of the module. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -2532,7 +2550,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -2555,9 +2573,11 @@ Selects the logging stack. Options are: - `loki`: will use a distributed Grafana Loki instead of OpenSearh for storage. - `customOuputs`: the Logging Operator will be deployed and installed but with no local storage, you will have to create the needed Outputs and ClusterOutputs to ship the logs to your desired storage. +Default is `opensearch`. + ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:----------------| @@ -2602,7 +2622,7 @@ Configuration for the Monitoring module. ### Description -The webhook URL to send dead man's switch monitoring, for example to use with healthchecks.io +The webhook URL to send dead man's switch monitoring, for example to use with healthchecks.io. ## .spec.distribution.modules.monitoring.alertmanager.installDefaultRules @@ -2658,7 +2678,7 @@ Set to override the tolerations that will be added to the pods of the package. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -2676,7 +2696,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -2741,7 +2761,7 @@ Set to override the tolerations that will be added to the pods of the package. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -2759,7 +2779,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -2826,7 +2846,7 @@ Set to override the tolerations that will be added to the pods of the package. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -2844,7 +2864,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -2880,7 +2900,7 @@ The storage backend type for Mimir. `minio` will use an in-cluster MinIO deploym ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -2967,7 +2987,7 @@ Set to override the tolerations that will be added to the pods of the package. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -2985,7 +3005,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -3052,7 +3072,7 @@ Set to override the tolerations that will be added to the pods of the package. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -3070,7 +3090,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -3151,7 +3171,7 @@ Set to override the tolerations that will be added to the pods of the module. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -3169,7 +3189,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -3224,13 +3244,13 @@ Set this option to ship the collected metrics to a remote Prometheus receiver. ### Description -The cpu limit for the loki pods +The CPU limit for the Pod. Example: `1000m`. ## .spec.distribution.modules.monitoring.prometheus.resources.limits.memory ### Description -The memory limit for the prometheus pods +The memory limit for the Pod. Example: `1G`. ## .spec.distribution.modules.monitoring.prometheus.resources.requests @@ -3245,13 +3265,13 @@ The memory limit for the prometheus pods ### Description -The cpu request for the loki pods +The CPU request for the Pod, in cores. Example: `500m`. ## .spec.distribution.modules.monitoring.prometheus.resources.requests.memory ### Description -The memory request for the prometheus pods +The memory request for the Pod. Example: `500M`. ## .spec.distribution.modules.monitoring.prometheus.retentionSize @@ -3310,13 +3330,13 @@ Set this option to ship the collected metrics to a remote Prometheus receiver. ### Description -The cpu limit for the loki pods +The CPU limit for the Pod. Example: `1000m`. ## .spec.distribution.modules.monitoring.prometheusAgent.resources.limits.memory ### Description -The memory limit for the prometheus pods +The memory limit for the Pod. Example: `1G`. ## .spec.distribution.modules.monitoring.prometheusAgent.resources.requests @@ -3331,13 +3351,13 @@ The memory limit for the prometheus pods ### Description -The cpu request for the loki pods +The CPU request for the Pod, in cores. Example: `500m`. ## .spec.distribution.modules.monitoring.prometheusAgent.resources.requests.memory ### Description -The memory request for the prometheus pods +The memory request for the Pod. Example: `500M`. ## .spec.distribution.modules.monitoring.type @@ -3350,9 +3370,11 @@ The type of the monitoring, must be `none`, `prometheus`, `prometheusAgent` or ` - `prometheusAgent`: wil install Prometheus operator, an instance of Prometheus in Agent mode (no alerting, no queries, no storage), and all the exporters needed to get metrics for the status of the cluster and the workloads. Useful when having a centralized (remote) Prometheus where to ship the metrics and not storing them locally in the cluster. - `mimir`: will install the same as the `prometheus` option, plus Grafana Mimir that allows for longer retention of metrics and the usage of Object Storage. +Default is `prometheus`. + ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:------------------| @@ -3403,7 +3425,7 @@ Set to override the tolerations that will be added to the pods of the package. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -3421,7 +3443,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -3499,7 +3521,7 @@ Set to override the tolerations that will be added to the pods of the package. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -3517,7 +3539,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -3587,7 +3609,7 @@ Set to override the tolerations that will be added to the pods of the module. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -3605,7 +3627,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -3660,7 +3682,7 @@ Set to override the tolerations that will be added to the pods of the package. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -3678,7 +3700,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -3699,7 +3721,7 @@ The type of CNI plugin to use, either `calico` (default, via the Tigera Operator ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -3750,7 +3772,7 @@ The default enforcement action to use for the included constraints. `deny` will ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -3798,7 +3820,7 @@ Set to override the tolerations that will be added to the pods of the package. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -3816,7 +3838,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -3890,7 +3912,7 @@ Set to override the tolerations that will be added to the pods of the package. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -3908,7 +3930,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -3929,7 +3951,7 @@ The validation failure action to use for the policies, `Enforce` will block when ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:----------| @@ -3977,7 +3999,7 @@ Set to override the tolerations that will be added to the pods of the module. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -3995,7 +4017,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -4014,9 +4036,11 @@ The value of the toleration The type of policy enforcement to use, either `none`, `gatekeeper` or `kyverno`. +Default is `none`. + ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------| @@ -4087,7 +4111,7 @@ Set to override the tolerations that will be added to the pods of the package. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -4105,7 +4129,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -4186,7 +4210,7 @@ Set to override the tolerations that will be added to the pods of the module. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -4204,7 +4228,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -4240,7 +4264,7 @@ The storage backend type for Tempo. `minio` will use an in-cluster MinIO deploym ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -4327,7 +4351,7 @@ Set to override the tolerations that will be added to the pods of the package. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -4345,7 +4369,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -4370,9 +4394,11 @@ The retention time for the traces stored in Tempo. The type of tracing to use, either `none` or `tempo`. `none` will disable the Tracing module and `tempo` will install a Grafana Tempo deployment. +Default is `tempo`. + ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:--------| @@ -4383,7 +4409,7 @@ The type of tracing to use, either `none` or `tempo`. `none` will disable the Tr ### Description -Defines which KFD version will be installed and, in consequence, the Kubernetes version used to create the cluster. It supports git tags and branches. Example: v1.30.1. +Defines which KFD version will be installed and, in consequence, the Kubernetes version used to create the cluster. It supports git tags and branches. Example: `v1.30.1`. ### Constraints @@ -5004,7 +5030,7 @@ Name for the node group. It will be also used as the node role label. It should ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -5150,14 +5176,15 @@ The subnet CIDR to use for the Services network. ### Properties -| Property | Type | Required | -|:-----------------------------------------------|:---------|:---------| -| [chart](#specpluginshelmreleaseschart) | `string` | Required | -| [name](#specpluginshelmreleasesname) | `string` | Required | -| [namespace](#specpluginshelmreleasesnamespace) | `string` | Required | -| [set](#specpluginshelmreleasesset) | `array` | Optional | -| [values](#specpluginshelmreleasesvalues) | `array` | Optional | -| [version](#specpluginshelmreleasesversion) | `string` | Optional | +| Property | Type | Required | +|:---------------------------------------------------------------------------------|:----------|:---------| +| [chart](#specpluginshelmreleaseschart) | `string` | Required | +| [disableValidationOnInstall](#specpluginshelmreleasesdisablevalidationoninstall) | `boolean` | Optional | +| [name](#specpluginshelmreleasesname) | `string` | Required | +| [namespace](#specpluginshelmreleasesnamespace) | `string` | Required | +| [set](#specpluginshelmreleasesset) | `array` | Optional | +| [values](#specpluginshelmreleasesvalues) | `array` | Optional | +| [version](#specpluginshelmreleasesversion) | `string` | Optional | ## .spec.plugins.helm.releases.chart @@ -5165,6 +5192,12 @@ The subnet CIDR to use for the Services network. The chart of the release +## .spec.plugins.helm.releases.disableValidationOnInstall + +### Description + +Disable running `helm diff` validation when installing the plugin, it will still be done when upgrading. + ## .spec.plugins.helm.releases.name ### Description diff --git a/pkg/apis/ekscluster/v1alpha2/private/schema.go b/pkg/apis/ekscluster/v1alpha2/private/schema.go index 4ea507871..2eca84566 100644 --- a/pkg/apis/ekscluster/v1alpha2/private/schema.go +++ b/pkg/apis/ekscluster/v1alpha2/private/schema.go @@ -8,7 +8,7 @@ import ( "reflect" ) -// A Fury Cluster deployed through AWS's Elastic Kubernetes Service +// A KFD Cluster deployed on top of AWS's Elastic Kubernetes Service (EKS). type EksclusterKfdV1Alpha2 struct { // ApiVersion corresponds to the JSON schema field "apiVersion". ApiVersion string `json:"apiVersion" yaml:"apiVersion" mapstructure:"apiVersion"` @@ -28,7 +28,8 @@ type EksclusterKfdV1Alpha2Kind string const EksclusterKfdV1Alpha2KindEKSCluster EksclusterKfdV1Alpha2Kind = "EKSCluster" type Metadata struct { - // Name corresponds to the JSON schema field "name". + // The name of the cluster. It will also be used as a prefix for all the other + // resources created. Name string `json:"name" yaml:"name" mapstructure:"name"` } @@ -36,7 +37,9 @@ type Spec struct { // Distribution corresponds to the JSON schema field "distribution". Distribution SpecDistribution `json:"distribution" yaml:"distribution" mapstructure:"distribution"` - // DistributionVersion corresponds to the JSON schema field "distributionVersion". + // Defines which KFD version will be installed and, in consequence, the Kubernetes + // version used to create the cluster. It supports git tags and branches. Example: + // `v1.30.1`. DistributionVersion string `json:"distributionVersion" yaml:"distributionVersion" mapstructure:"distributionVersion"` // Infrastructure corresponds to the JSON schema field "infrastructure". @@ -48,14 +51,15 @@ type Spec struct { // Plugins corresponds to the JSON schema field "plugins". Plugins *SpecPlugins `json:"plugins,omitempty" yaml:"plugins,omitempty" mapstructure:"plugins,omitempty"` - // Region corresponds to the JSON schema field "region". + // Defines in which AWS region the cluster and all the related resources will be + // created. Region TypesAwsRegion `json:"region" yaml:"region" mapstructure:"region"` // This map defines which will be the common tags that will be added to all the // resources created on AWS. Tags TypesAwsTags `json:"tags,omitempty" yaml:"tags,omitempty" mapstructure:"tags,omitempty"` - // ToolsConfiguration corresponds to the JSON schema field "toolsConfiguration". + // Configuration for tools used by furyctl, like Terraform. ToolsConfiguration SpecToolsConfiguration `json:"toolsConfiguration" yaml:"toolsConfiguration" mapstructure:"toolsConfiguration"` } @@ -70,29 +74,35 @@ type SpecDistribution struct { Modules SpecDistributionModules `json:"modules" yaml:"modules" mapstructure:"modules"` } +// Common configuration for all the distribution modules. type SpecDistributionCommon struct { - // The node selector to use to place the pods for all the KFD modules + // The node selector to use to place the pods for all the KFD modules. Follows + // Kubernetes selector format. Example: `node.kubernetes.io/role: infra`. NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` // Provider corresponds to the JSON schema field "provider". Provider *SpecDistributionCommonProvider `json:"provider,omitempty" yaml:"provider,omitempty" mapstructure:"provider,omitempty"` // URL of the registry where to pull images from for the Distribution phase. - // (Default is registry.sighup.io/fury). - // - // NOTE: If plugins are pulling from the default registry, the registry will be - // replaced for these plugins too. + // (Default is `registry.sighup.io/fury`). Registry *string `json:"registry,omitempty" yaml:"registry,omitempty" mapstructure:"registry,omitempty"` - // The relative path to the vendor directory, does not need to be changed + // The relative path to the vendor directory, does not need to be changed. RelativeVendorPath *string `json:"relativeVendorPath,omitempty" yaml:"relativeVendorPath,omitempty" mapstructure:"relativeVendorPath,omitempty"` - // The tolerations that will be added to the pods for all the KFD modules + // An array with the tolerations that will be added to the pods for all the KFD + // modules. Follows Kubernetes tolerations format. Example: + // + // ```yaml + // - effect: NoSchedule + // key: node.kubernetes.io/role + // value: infra + // ``` Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` } type SpecDistributionCommonProvider struct { - // The type of the provider, must be EKS if specified + // The provider type. Don't set. FOR INTERNAL USE ONLY. Type string `json:"type" yaml:"type" mapstructure:"type"` } @@ -294,8 +304,11 @@ type SpecDistributionModules struct { Tracing *SpecDistributionModulesTracing `json:"tracing,omitempty" yaml:"tracing,omitempty" mapstructure:"tracing,omitempty"` } +// Configuration for the Auth module. type SpecDistributionModulesAuth struct { - // The base domain for the auth module + // Base domain for the ingresses created by the Auth module (Gangplank, Pomerium, + // Dex). Notice that when nginx type is dual, these will use the `external` + // ingress class. BaseDomain *string `json:"baseDomain,omitempty" yaml:"baseDomain,omitempty" mapstructure:"baseDomain,omitempty"` // Dex corresponds to the JSON schema field "dex". @@ -311,11 +324,25 @@ type SpecDistributionModulesAuth struct { Provider SpecDistributionModulesAuthProvider `json:"provider" yaml:"provider" mapstructure:"provider"` } +// Configuration for the Dex package. type SpecDistributionModulesAuthDex struct { - // The additional static clients for dex + // Additional static clients defitions that will be added to the default clients + // included with the distribution in Dex's configuration. Example: + // + // ```yaml + // additionalStaticClients: + // - id: my-custom-client + // name: "A custom additional static client" + // redirectURIs: + // - "https://myapp.tld/redirect" + // - "https://alias.tld/oidc-callback" + // secret: supersecretpassword + // ``` + // Reference: https://dexidp.io/docs/connectors/local/ AdditionalStaticClients []interface{} `json:"additionalStaticClients,omitempty" yaml:"additionalStaticClients,omitempty" mapstructure:"additionalStaticClients,omitempty"` - // The connectors for dex + // A list with each item defining a Dex connector. Follows Dex connectors + // configuration format: https://dexidp.io/docs/connectors/ Connectors []interface{} `json:"connectors" yaml:"connectors" mapstructure:"connectors"` // Expiry corresponds to the JSON schema field "expiry". @@ -333,25 +360,29 @@ type SpecDistributionModulesAuthDexExpiry struct { SigningKeys *string `json:"signingKeys,omitempty" yaml:"signingKeys,omitempty" mapstructure:"signingKeys,omitempty"` } +// Override the common configuration with a particular configuration for the Auth +// module. type SpecDistributionModulesAuthOverrides struct { - // Ingresses corresponds to the JSON schema field "ingresses". + // Override the definition of the Auth module ingresses. Ingresses SpecDistributionModulesAuthOverridesIngresses `json:"ingresses,omitempty" yaml:"ingresses,omitempty" mapstructure:"ingresses,omitempty"` - // The node selector to use to place the pods for the auth module + // Set to override the node selector used to place the pods of the Auth module. NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` - // The tolerations that will be added to the pods for the auth module + // Set to override the tolerations that will be added to the pods of the Auth + // module. Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` } type SpecDistributionModulesAuthOverridesIngress struct { - // The host of the ingress + // Use this host for the ingress instead of the default one. Host string `json:"host" yaml:"host" mapstructure:"host"` - // The ingress class of the ingress + // Use this ingress class for the ingress instead of the default one. IngressClass string `json:"ingressClass" yaml:"ingressClass" mapstructure:"ingressClass"` } +// Override the definition of the Auth module ingresses. type SpecDistributionModulesAuthOverridesIngresses map[string]SpecDistributionModulesAuthOverridesIngress type SpecDistributionModulesAuthPomerium interface{} @@ -476,15 +507,23 @@ type SpecDistributionModulesAuthProvider struct { // BasicAuth corresponds to the JSON schema field "basicAuth". BasicAuth *SpecDistributionModulesAuthProviderBasicAuth `json:"basicAuth,omitempty" yaml:"basicAuth,omitempty" mapstructure:"basicAuth,omitempty"` - // The type of the provider, must be ***none***, ***sso*** or ***basicAuth*** + // The type of the Auth provider, options are: + // - `none`: will disable authentication in the infrastructural ingresses. + // - `sso`: will protect the infrastructural ingresses with Pomerium and Dex (SSO) + // and require authentication before accessing them. + // - `basicAuth`: will protect the infrastructural ingresses with HTTP basic auth + // (username and password) authentication. + // + // Default is `none`. Type SpecDistributionModulesAuthProviderType `json:"type" yaml:"type" mapstructure:"type"` } +// Configuration for the HTTP Basic Auth provider. type SpecDistributionModulesAuthProviderBasicAuth struct { - // The password for the basic auth + // The password for logging in with the HTTP basic authentication. Password string `json:"password" yaml:"password" mapstructure:"password"` - // The username for the basic auth + // The username for logging in with the HTTP basic authentication. Username string `json:"username" yaml:"username" mapstructure:"username"` } @@ -544,11 +583,16 @@ type SpecDistributionModulesAwsLoadBalancerController struct { Overrides *TypesFuryModuleComponentOverridesWithIAMRoleName `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` } +// Configuration for the Disaster Recovery module. type SpecDistributionModulesDr struct { // Overrides corresponds to the JSON schema field "overrides". Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - // The type of the DR, must be ***none*** or ***eks*** + // The type of the Disaster Recovery, must be `none` or `eks`. `none` disables the + // module and `eks` will install Velero and use an S3 bucket to store the + // backups. + // + // Default is `none`. Type SpecDistributionModulesDrType `json:"type" yaml:"type" mapstructure:"type"` // Velero corresponds to the JSON schema field "velero". @@ -574,13 +618,13 @@ type SpecDistributionModulesDrVelero struct { } type SpecDistributionModulesDrVeleroEks struct { - // The name of the velero bucket + // The name of the bucket for Velero. BucketName TypesAwsS3BucketName `json:"bucketName" yaml:"bucketName" mapstructure:"bucketName"` // IamRoleArn corresponds to the JSON schema field "iamRoleArn". IamRoleArn TypesAwsArn `json:"iamRoleArn" yaml:"iamRoleArn" mapstructure:"iamRoleArn"` - // The region where the velero bucket is located + // The region where the bucket for Velero will be located. Region TypesAwsRegion `json:"region" yaml:"region" mapstructure:"region"` } @@ -610,12 +654,15 @@ type SpecDistributionModulesDrVeleroSchedulesCron struct { } type SpecDistributionModulesIngress struct { - // the base domain used for all the KFD ingresses, if in the nginx dual - // configuration, it should be the same as the - // .spec.distribution.modules.ingress.dns.private.name zone + // The base domain used for all the KFD ingresses. If in the nginx `dual` + // configuration type, this value should be the same as the + // `.spec.distribution.modules.ingress.dns.private.name` zone. BaseDomain string `json:"baseDomain" yaml:"baseDomain" mapstructure:"baseDomain"` - // CertManager corresponds to the JSON schema field "certManager". + // Configuration for the cert-manager package. Required even if + // `ingress.nginx.type` is `none`, cert-manager is used for managing other + // certificates in the cluster besides the TLS termination certificates for the + // ingresses. CertManager SpecDistributionModulesIngressCertManager `json:"certManager" yaml:"certManager" mapstructure:"certManager"` // Dns corresponds to the JSON schema field "dns". @@ -627,13 +674,17 @@ type SpecDistributionModulesIngress struct { // Forecastle corresponds to the JSON schema field "forecastle". Forecastle *SpecDistributionModulesIngressForecastle `json:"forecastle,omitempty" yaml:"forecastle,omitempty" mapstructure:"forecastle,omitempty"` - // Configurations for the nginx ingress controller module + // Configurations for the Ingress nginx controller package. Nginx SpecDistributionModulesIngressNginx `json:"nginx" yaml:"nginx" mapstructure:"nginx"` // Overrides corresponds to the JSON schema field "overrides". Overrides *SpecDistributionModulesIngressOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` } +// Configuration for the cert-manager package. Required even if +// `ingress.nginx.type` is `none`, cert-manager is used for managing other +// certificates in the cluster besides the TLS termination certificates for the +// ingresses. type SpecDistributionModulesIngressCertManager struct { // ClusterIssuer corresponds to the JSON schema field "clusterIssuer". ClusterIssuer SpecDistributionModulesIngressCertManagerClusterIssuer `json:"clusterIssuer" yaml:"clusterIssuer" mapstructure:"clusterIssuer"` @@ -642,20 +693,24 @@ type SpecDistributionModulesIngressCertManager struct { Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` } +// Configuration for the cert-manager's ACME clusterIssuer used to request +// certificates from Let's Encrypt. type SpecDistributionModulesIngressCertManagerClusterIssuer struct { - // The email of the cluster issuer + // The email address to use during the certificate issuing process. Email string `json:"email" yaml:"email" mapstructure:"email"` - // The name of the cluster issuer + // Name of the clusterIssuer. Name string `json:"name" yaml:"name" mapstructure:"name"` // Route53 corresponds to the JSON schema field "route53". Route53 SpecDistributionModulesIngressClusterIssuerRoute53 `json:"route53" yaml:"route53" mapstructure:"route53"` - // The custom solvers configurations + // List of challenge solvers to use instead of the default one for the `http01` + // challenge. Solvers []interface{} `json:"solvers,omitempty" yaml:"solvers,omitempty" mapstructure:"solvers,omitempty"` - // The type of the cluster issuer, must be ***dns01*** or ***http01*** + // The type of the clusterIssuer, must be `dns01` for using DNS challenge or + // `http01` for using HTTP challenge. Type *SpecDistributionModulesIngressCertManagerClusterIssuerType `json:"type,omitempty" yaml:"type,omitempty" mapstructure:"type,omitempty"` } @@ -677,6 +732,8 @@ type SpecDistributionModulesIngressClusterIssuerRoute53 struct { Region TypesAwsRegion `json:"region" yaml:"region" mapstructure:"region"` } +// DNS definition, used in conjunction with `externalDNS` package to automate DNS +// management and certificates emission. type SpecDistributionModulesIngressDNS struct { // Overrides corresponds to the JSON schema field "overrides". Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` @@ -688,11 +745,14 @@ type SpecDistributionModulesIngressDNS struct { Public SpecDistributionModulesIngressDNSPublic `json:"public" yaml:"public" mapstructure:"public"` } +// The private DNS zone is used only when `ingress.nginx.type` is `dual`, for +// exposing infrastructural services only in the private DNS zone. type SpecDistributionModulesIngressDNSPrivate struct { - // If true, the private hosted zone will be created + // By default, a Terraform data source will be used to get the private DNS zone. + // Set to `true` to create the private zone instead. Create bool `json:"create" yaml:"create" mapstructure:"create"` - // The name of the private hosted zone + // The name of the private hosted zone. Example: `internal.fury-demo.sighup.io`. Name string `json:"name" yaml:"name" mapstructure:"name"` // VpcId corresponds to the JSON schema field "vpcId". @@ -700,10 +760,11 @@ type SpecDistributionModulesIngressDNSPrivate struct { } type SpecDistributionModulesIngressDNSPublic struct { - // If true, the public hosted zone will be created + // By default, a Terraform data source will be used to get the public DNS zone. + // Set to `true` to create the public zone instead. Create bool `json:"create" yaml:"create" mapstructure:"create"` - // The name of the public hosted zone + // The name of the public hosted zone. Name string `json:"name" yaml:"name" mapstructure:"name"` } @@ -727,14 +788,24 @@ type SpecDistributionModulesIngressNginx struct { // Tls corresponds to the JSON schema field "tls". Tls *SpecDistributionModulesIngressNginxTLS `json:"tls,omitempty" yaml:"tls,omitempty" mapstructure:"tls,omitempty"` - // The type of the nginx ingress controller, must be ***none***, ***single*** or - // ***dual*** + // The type of the Ingress nginx controller, options are: + // - `none`: no ingress controller will be installed and no infrastructural + // ingresses will be created. + // - `single`: a single ingress controller with ingress class `nginx` will be + // installed to manage all the ingress resources, infrastructural ingresses will + // be created. + // - `dual`: two independent ingress controllers will be installed, one for the + // `internal` ingress class intended for private ingresses and one for the + // `external` ingress class intended for public ingresses. KFD infrastructural + // ingresses wil use the `internal` ingress class when using the dual type. + // + // Default is `single`. Type SpecDistributionModulesIngressNginxType `json:"type" yaml:"type" mapstructure:"type"` } type SpecDistributionModulesIngressNginxTLS struct { - // The provider of the TLS certificate, must be ***none***, ***certManager*** or - // ***secret*** + // The provider of the TLS certificates for the ingresses, one of: `none`, + // `certManager`, or `secret`. Provider SpecDistributionModulesIngressNginxTLSProvider `json:"provider" yaml:"provider" mapstructure:"provider"` // Secret corresponds to the JSON schema field "secret". @@ -749,15 +820,18 @@ const ( SpecDistributionModulesIngressNginxTLSProviderSecret SpecDistributionModulesIngressNginxTLSProvider = "secret" ) +// Kubernetes TLS secret for the ingresses TLS certificate. type SpecDistributionModulesIngressNginxTLSSecret struct { - // Ca corresponds to the JSON schema field "ca". + // The Certificate Authority certificate file's content. You can use the + // `"{file://}"` notation to get the content from a file. Ca string `json:"ca" yaml:"ca" mapstructure:"ca"` - // The certificate file content or you can use the file notation to get the - // content from a file + // The certificate file's content. You can use the `"{file://}"` notation to + // get the content from a file. Cert string `json:"cert" yaml:"cert" mapstructure:"cert"` - // Key corresponds to the JSON schema field "key". + // The signing key file's content. You can use the `"{file://}"` notation to + // get the content from a file. Key string `json:"key" yaml:"key" mapstructure:"key"` } @@ -769,14 +843,17 @@ const ( SpecDistributionModulesIngressNginxTypeSingle SpecDistributionModulesIngressNginxType = "single" ) +// Override the common configuration with a particular configuration for the +// Ingress module. type SpecDistributionModulesIngressOverrides struct { // Ingresses corresponds to the JSON schema field "ingresses". Ingresses *SpecDistributionModulesIngressOverridesIngresses `json:"ingresses,omitempty" yaml:"ingresses,omitempty" mapstructure:"ingresses,omitempty"` - // The node selector to use to place the pods for the ingress module + // Set to override the node selector used to place the pods of the Ingress module. NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` - // The tolerations that will be added to the pods for the ingress module + // Set to override the tolerations that will be added to the pods of the Ingress + // module. Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` } @@ -785,6 +862,7 @@ type SpecDistributionModulesIngressOverridesIngresses struct { Forecastle *TypesFuryModuleOverridesIngress `json:"forecastle,omitempty" yaml:"forecastle,omitempty" mapstructure:"forecastle,omitempty"` } +// Configuration for the Logging module. type SpecDistributionModulesLogging struct { // Cerebro corresponds to the JSON schema field "cerebro". Cerebro *SpecDistributionModulesLoggingCerebro `json:"cerebro,omitempty" yaml:"cerebro,omitempty" mapstructure:"cerebro,omitempty"` @@ -807,79 +885,87 @@ type SpecDistributionModulesLogging struct { // Overrides corresponds to the JSON schema field "overrides". Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - // selects the logging stack. Choosing none will disable the centralized logging. - // Choosing opensearch will deploy and configure the Logging Operator and an + // Selects the logging stack. Options are: + // - `none`: will disable the centralized logging. + // - `opensearch`: will deploy and configure the Logging Operator and an // OpenSearch cluster (can be single or triple for HA) where the logs will be - // stored. Choosing loki will use a distributed Grafana Loki instead of OpenSearh - // for storage. Choosing customOuput the Logging Operator will be deployed and - // installed but with no local storage, you will have to create the needed Outputs - // and ClusterOutputs to ship the logs to your desired storage. + // stored. + // - `loki`: will use a distributed Grafana Loki instead of OpenSearh for storage. + // - `customOuputs`: the Logging Operator will be deployed and installed but with + // no local storage, you will have to create the needed Outputs and ClusterOutputs + // to ship the logs to your desired storage. + // + // Default is `opensearch`. Type SpecDistributionModulesLoggingType `json:"type" yaml:"type" mapstructure:"type"` } +// DEPRECATED since KFD v1.26.6, 1.27.5, v1.28.0. type SpecDistributionModulesLoggingCerebro struct { // Overrides corresponds to the JSON schema field "overrides". Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` } -// when using the customOutputs logging type, you need to manually specify the spec -// of the several Output and ClusterOutputs that the Logging Operator expects to -// forward the logs collected by the pre-defined flows. +// When using the `customOutputs` logging type, you need to manually specify the +// spec of the several `Output` and `ClusterOutputs` that the Logging Operator +// expects to forward the logs collected by the pre-defined flows. type SpecDistributionModulesLoggingCustomOutputs struct { - // This value defines where the output from Flow will be sent. Will be the `spec` - // section of the `Output` object. It must be a string (and not a YAML object) - // following the OutputSpec definition. Use the nullout output to discard the - // flow. + // This value defines where the output from the `audit` Flow will be sent. This + // will be the `spec` section of the `Output` object. It must be a string (and not + // a YAML object) following the OutputSpec definition. Use the `nullout` output to + // discard the flow: `nullout: {}` Audit string `json:"audit" yaml:"audit" mapstructure:"audit"` - // This value defines where the output from Flow will be sent. Will be the `spec` - // section of the `Output` object. It must be a string (and not a YAML object) - // following the OutputSpec definition. Use the nullout output to discard the - // flow. + // This value defines where the output from the `errors` Flow will be sent. This + // will be the `spec` section of the `Output` object. It must be a string (and not + // a YAML object) following the OutputSpec definition. Use the `nullout` output to + // discard the flow: `nullout: {}` Errors string `json:"errors" yaml:"errors" mapstructure:"errors"` - // This value defines where the output from Flow will be sent. Will be the `spec` - // section of the `Output` object. It must be a string (and not a YAML object) - // following the OutputSpec definition. Use the nullout output to discard the - // flow. + // This value defines where the output from the `events` Flow will be sent. This + // will be the `spec` section of the `Output` object. It must be a string (and not + // a YAML object) following the OutputSpec definition. Use the `nullout` output to + // discard the flow: `nullout: {}` Events string `json:"events" yaml:"events" mapstructure:"events"` - // This value defines where the output from Flow will be sent. Will be the `spec` - // section of the `Output` object. It must be a string (and not a YAML object) - // following the OutputSpec definition. Use the nullout output to discard the - // flow. + // This value defines where the output from the `infra` Flow will be sent. This + // will be the `spec` section of the `Output` object. It must be a string (and not + // a YAML object) following the OutputSpec definition. Use the `nullout` output to + // discard the flow: `nullout: {}` Infra string `json:"infra" yaml:"infra" mapstructure:"infra"` - // This value defines where the output from Flow will be sent. Will be the `spec` - // section of the `Output` object. It must be a string (and not a YAML object) - // following the OutputSpec definition. Use the nullout output to discard the - // flow. + // This value defines where the output from the `ingressNginx` Flow will be sent. + // This will be the `spec` section of the `Output` object. It must be a string + // (and not a YAML object) following the OutputSpec definition. Use the `nullout` + // output to discard the flow: `nullout: {}` IngressNginx string `json:"ingressNginx" yaml:"ingressNginx" mapstructure:"ingressNginx"` - // This value defines where the output from Flow will be sent. Will be the `spec` - // section of the `Output` object. It must be a string (and not a YAML object) - // following the OutputSpec definition. Use the nullout output to discard the - // flow. + // This value defines where the output from the `kubernetes` Flow will be sent. + // This will be the `spec` section of the `Output` object. It must be a string + // (and not a YAML object) following the OutputSpec definition. Use the `nullout` + // output to discard the flow: `nullout: {}` Kubernetes string `json:"kubernetes" yaml:"kubernetes" mapstructure:"kubernetes"` - // This value defines where the output from Flow will be sent. Will be the `spec` - // section of the `Output` object. It must be a string (and not a YAML object) - // following the OutputSpec definition. Use the nullout output to discard the - // flow. + // This value defines where the output from the `systemdCommon` Flow will be sent. + // This will be the `spec` section of the `Output` object. It must be a string + // (and not a YAML object) following the OutputSpec definition. Use the `nullout` + // output to discard the flow: `nullout: {}` SystemdCommon string `json:"systemdCommon" yaml:"systemdCommon" mapstructure:"systemdCommon"` - // This value defines where the output from Flow will be sent. Will be the `spec` - // section of the `Output` object. It must be a string (and not a YAML object) - // following the OutputSpec definition. Use the nullout output to discard the - // flow. + // This value defines where the output from the `systemdEtcd` Flow will be sent. + // This will be the `spec` section of the `Output` object. It must be a string + // (and not a YAML object) following the OutputSpec definition. Use the `nullout` + // output to discard the flow: `nullout: {}` SystemdEtcd string `json:"systemdEtcd" yaml:"systemdEtcd" mapstructure:"systemdEtcd"` } +// Configuration for the Loki package. type SpecDistributionModulesLoggingLoki struct { - // Backend corresponds to the JSON schema field "backend". + // The storage backend type for Loki. `minio` will use an in-cluster MinIO + // deployment for object storage, `externalEndpoint` can be used to point to an + // external object storage instead of deploying an in-cluster MinIO. Backend *SpecDistributionModulesLoggingLokiBackend `json:"backend,omitempty" yaml:"backend,omitempty" mapstructure:"backend,omitempty"` - // ExternalEndpoint corresponds to the JSON schema field "externalEndpoint". + // Configuration for Loki's external storage backend. ExternalEndpoint *SpecDistributionModulesLoggingLokiExternalEndpoint `json:"externalEndpoint,omitempty" yaml:"externalEndpoint,omitempty" mapstructure:"externalEndpoint,omitempty"` // Resources corresponds to the JSON schema field "resources". @@ -893,23 +979,25 @@ const ( SpecDistributionModulesLoggingLokiBackendMinio SpecDistributionModulesLoggingLokiBackend = "minio" ) +// Configuration for Loki's external storage backend. type SpecDistributionModulesLoggingLokiExternalEndpoint struct { - // The access key id of the loki external endpoint + // The access key ID (username) for the external S3-compatible bucket. AccessKeyId *string `json:"accessKeyId,omitempty" yaml:"accessKeyId,omitempty" mapstructure:"accessKeyId,omitempty"` - // The bucket name of the loki external endpoint + // The bucket name of the external S3-compatible object storage. BucketName *string `json:"bucketName,omitempty" yaml:"bucketName,omitempty" mapstructure:"bucketName,omitempty"` - // The endpoint of the loki external endpoint + // External S3-compatible endpoint for Loki's storage. Endpoint *string `json:"endpoint,omitempty" yaml:"endpoint,omitempty" mapstructure:"endpoint,omitempty"` - // If true, the loki external endpoint will be insecure + // If true, will use HTTP as protocol instead of HTTPS. Insecure *bool `json:"insecure,omitempty" yaml:"insecure,omitempty" mapstructure:"insecure,omitempty"` - // The secret access key of the loki external endpoint + // The secret access key (password) for the external S3-compatible bucket. SecretAccessKey *string `json:"secretAccessKey,omitempty" yaml:"secretAccessKey,omitempty" mapstructure:"secretAccessKey,omitempty"` } +// Configuration for Logging's MinIO deployment. type SpecDistributionModulesLoggingMinio struct { // Overrides corresponds to the JSON schema field "overrides". Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` @@ -917,15 +1005,15 @@ type SpecDistributionModulesLoggingMinio struct { // RootUser corresponds to the JSON schema field "rootUser". RootUser *SpecDistributionModulesLoggingMinioRootUser `json:"rootUser,omitempty" yaml:"rootUser,omitempty" mapstructure:"rootUser,omitempty"` - // The PVC size for each minio disk, 6 disks total + // The PVC size for each MinIO disk, 6 disks total. StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"` } type SpecDistributionModulesLoggingMinioRootUser struct { - // The password of the minio root user + // The password for the default MinIO root user. Password *string `json:"password,omitempty" yaml:"password,omitempty" mapstructure:"password,omitempty"` - // The username of the minio root user + // The username for the default MinIO root user. Username *string `json:"username,omitempty" yaml:"username,omitempty" mapstructure:"username,omitempty"` } @@ -936,10 +1024,11 @@ type SpecDistributionModulesLoggingOpensearch struct { // Resources corresponds to the JSON schema field "resources". Resources *TypesKubeResources `json:"resources,omitempty" yaml:"resources,omitempty" mapstructure:"resources,omitempty"` - // The storage size for the opensearch pods + // The storage size for the OpenSearch volumes. StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"` - // The type of the opensearch, must be ***single*** or ***triple*** + // The type of OpenSearch deployment. One of: `single` for a single replica or + // `triple` for an HA 3-replicas deployment. Type SpecDistributionModulesLoggingOpensearchType `json:"type" yaml:"type" mapstructure:"type"` } @@ -950,6 +1039,7 @@ const ( SpecDistributionModulesLoggingOpensearchTypeTriple SpecDistributionModulesLoggingOpensearchType = "triple" ) +// Configuration for the Logging Operator. type SpecDistributionModulesLoggingOperator struct { // Overrides corresponds to the JSON schema field "overrides". Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` @@ -964,7 +1054,7 @@ const ( SpecDistributionModulesLoggingTypeOpensearch SpecDistributionModulesLoggingType = "opensearch" ) -// configuration for the Monitoring module components +// Configuration for the Monitoring module. type SpecDistributionModulesMonitoring struct { // Alertmanager corresponds to the JSON schema field "alertmanager". Alertmanager *SpecDistributionModulesMonitoringAlertManager `json:"alertmanager,omitempty" yaml:"alertmanager,omitempty" mapstructure:"alertmanager,omitempty"` @@ -993,12 +1083,12 @@ type SpecDistributionModulesMonitoring struct { // PrometheusAgent corresponds to the JSON schema field "prometheusAgent". PrometheusAgent *SpecDistributionModulesMonitoringPrometheusAgent `json:"prometheusAgent,omitempty" yaml:"prometheusAgent,omitempty" mapstructure:"prometheusAgent,omitempty"` - // The type of the monitoring, must be ***none***, ***prometheus***, - // ***prometheusAgent*** or ***mimir***. + // The type of the monitoring, must be `none`, `prometheus`, `prometheusAgent` or + // `mimir`. // // - `none`: will disable the whole monitoring stack. // - `prometheus`: will install Prometheus Operator and a preconfigured Prometheus - // instance, Alertmanager, a set of alert rules, exporters needed to monitor all + // instace, Alertmanager, a set of alert rules, exporters needed to monitor all // the components of the cluster, Grafana and a series of dashboards to view the // collected metrics, and more. // - `prometheusAgent`: wil install Prometheus operator, an instance of Prometheus @@ -1006,9 +1096,10 @@ type SpecDistributionModulesMonitoring struct { // needed to get metrics for the status of the cluster and the workloads. Useful // when having a centralized (remote) Prometheus where to ship the metrics and not // storing them locally in the cluster. - // - `mimir`: will install the same as the `prometheus` option, and in addition - // Grafana Mimir that allows for longer retention of metrics and the usage of - // Object Storage. + // - `mimir`: will install the same as the `prometheus` option, plus Grafana Mimir + // that allows for longer retention of metrics and the usage of Object Storage. + // + // Default is `prometheus`. Type SpecDistributionModulesMonitoringType `json:"type" yaml:"type" mapstructure:"type"` // X509Exporter corresponds to the JSON schema field "x509Exporter". @@ -1016,14 +1107,15 @@ type SpecDistributionModulesMonitoring struct { } type SpecDistributionModulesMonitoringAlertManager struct { - // The webhook url to send deadman switch monitoring, for example to use with - // healthchecks.io + // The webhook URL to send dead man's switch monitoring, for example to use with + // healthchecks.io. DeadManSwitchWebhookUrl *string `json:"deadManSwitchWebhookUrl,omitempty" yaml:"deadManSwitchWebhookUrl,omitempty" mapstructure:"deadManSwitchWebhookUrl,omitempty"` - // If true, the default rules will be installed + // Set to false to avoid installing the Prometheus rules (alerts) included with + // the distribution. InstallDefaultRules *bool `json:"installDefaultRules,omitempty" yaml:"installDefaultRules,omitempty" mapstructure:"installDefaultRules,omitempty"` - // The slack webhook url to send alerts + // The Slack webhook URL where to send the infrastructural and workload alerts to. SlackWebhookUrl *string `json:"slackWebhookUrl,omitempty" yaml:"slackWebhookUrl,omitempty" mapstructure:"slackWebhookUrl,omitempty"` } @@ -1062,17 +1154,22 @@ type SpecDistributionModulesMonitoringKubeStateMetrics struct { Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` } +// Configuration for the Mimir package. type SpecDistributionModulesMonitoringMimir struct { - // The backend for the mimir pods, must be ***minio*** or ***externalEndpoint*** + // The storage backend type for Mimir. `minio` will use an in-cluster MinIO + // deployment for object storage, `externalEndpoint` can be used to point to an + // external S3-compatible object storage instead of deploying an in-cluster MinIO. Backend *SpecDistributionModulesMonitoringMimirBackend `json:"backend,omitempty" yaml:"backend,omitempty" mapstructure:"backend,omitempty"` - // ExternalEndpoint corresponds to the JSON schema field "externalEndpoint". + // Configuration for Mimir's external storage backend. ExternalEndpoint *SpecDistributionModulesMonitoringMimirExternalEndpoint `json:"externalEndpoint,omitempty" yaml:"externalEndpoint,omitempty" mapstructure:"externalEndpoint,omitempty"` // Overrides corresponds to the JSON schema field "overrides". Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - // The retention time for the mimir pods + // The retention time for the logs stored in Mimir. Default is `30d`. Value must + // match the regular expression `[0-9]+(ns|us|µs|ms|s|m|h|d|w|y)` where y = 365 + // days. RetentionTime *string `json:"retentionTime,omitempty" yaml:"retentionTime,omitempty" mapstructure:"retentionTime,omitempty"` } @@ -1083,23 +1180,25 @@ const ( SpecDistributionModulesMonitoringMimirBackendMinio SpecDistributionModulesMonitoringMimirBackend = "minio" ) +// Configuration for Mimir's external storage backend. type SpecDistributionModulesMonitoringMimirExternalEndpoint struct { - // The access key id of the external mimir backend + // The access key ID (username) for the external S3-compatible bucket. AccessKeyId *string `json:"accessKeyId,omitempty" yaml:"accessKeyId,omitempty" mapstructure:"accessKeyId,omitempty"` - // The bucket name of the external mimir backend + // The bucket name of the external S3-compatible object storage. BucketName *string `json:"bucketName,omitempty" yaml:"bucketName,omitempty" mapstructure:"bucketName,omitempty"` - // The endpoint of the external mimir backend + // External S3-compatible endpoint for Mimir's storage. Endpoint *string `json:"endpoint,omitempty" yaml:"endpoint,omitempty" mapstructure:"endpoint,omitempty"` - // If true, the external mimir backend will not use tls + // If true, will use HTTP as protocol instead of HTTPS. Insecure *bool `json:"insecure,omitempty" yaml:"insecure,omitempty" mapstructure:"insecure,omitempty"` - // The secret access key of the external mimir backend + // The secret access key (password) for the external S3-compatible bucket. SecretAccessKey *string `json:"secretAccessKey,omitempty" yaml:"secretAccessKey,omitempty" mapstructure:"secretAccessKey,omitempty"` } +// Configuration for Monitoring's MinIO deployment. type SpecDistributionModulesMonitoringMinio struct { // Overrides corresponds to the JSON schema field "overrides". Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` @@ -1107,15 +1206,15 @@ type SpecDistributionModulesMonitoringMinio struct { // RootUser corresponds to the JSON schema field "rootUser". RootUser *SpecDistributionModulesMonitoringMinioRootUser `json:"rootUser,omitempty" yaml:"rootUser,omitempty" mapstructure:"rootUser,omitempty"` - // The storage size for the minio pods + // The PVC size for each MinIO disk, 6 disks total. StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"` } type SpecDistributionModulesMonitoringMinioRootUser struct { - // The password for the minio root user + // The password for the default MinIO root user. Password *string `json:"password,omitempty" yaml:"password,omitempty" mapstructure:"password,omitempty"` - // The username for the minio root user + // The username for the default MinIO root user. Username *string `json:"username,omitempty" yaml:"username,omitempty" mapstructure:"username,omitempty"` } @@ -1132,13 +1231,13 @@ type SpecDistributionModulesMonitoringPrometheus struct { // Resources corresponds to the JSON schema field "resources". Resources *TypesKubeResources `json:"resources,omitempty" yaml:"resources,omitempty" mapstructure:"resources,omitempty"` - // The retention size for the k8s Prometheus instance. + // The retention size for the `k8s` Prometheus instance. RetentionSize *string `json:"retentionSize,omitempty" yaml:"retentionSize,omitempty" mapstructure:"retentionSize,omitempty"` - // The retention time for the k8s Prometheus instance. + // The retention time for the `k8s` Prometheus instance. RetentionTime *string `json:"retentionTime,omitempty" yaml:"retentionTime,omitempty" mapstructure:"retentionTime,omitempty"` - // The storage size for the k8s Prometheus instance. + // The storage size for the `k8s` Prometheus instance. StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"` } @@ -1174,9 +1273,10 @@ type SpecDistributionModulesMonitoringX509Exporter struct { Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` } +// Configuration for the Networking module. type SpecDistributionModulesNetworking struct { // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` // TigeraOperator corresponds to the JSON schema field "tigeraOperator". TigeraOperator *SpecDistributionModulesNetworkingTigeraOperator `json:"tigeraOperator,omitempty" yaml:"tigeraOperator,omitempty" mapstructure:"tigeraOperator,omitempty"` @@ -1194,6 +1294,7 @@ type SpecDistributionModulesNetworkingType string const SpecDistributionModulesNetworkingTypeNone SpecDistributionModulesNetworkingType = "none" +// Configuration for the Policy module. type SpecDistributionModulesPolicy struct { // Gatekeeper corresponds to the JSON schema field "gatekeeper". Gatekeeper *SpecDistributionModulesPolicyGatekeeper `json:"gatekeeper,omitempty" yaml:"gatekeeper,omitempty" mapstructure:"gatekeeper,omitempty"` @@ -1204,20 +1305,27 @@ type SpecDistributionModulesPolicy struct { // Overrides corresponds to the JSON schema field "overrides". Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - // The type of security to use, either ***none***, ***gatekeeper*** or - // ***kyverno*** + // The type of policy enforcement to use, either `none`, `gatekeeper` or + // `kyverno`. + // + // Default is `none`. Type SpecDistributionModulesPolicyType `json:"type" yaml:"type" mapstructure:"type"` } +// Configuration for the Gatekeeper package. type SpecDistributionModulesPolicyGatekeeper struct { // This parameter adds namespaces to Gatekeeper's exemption list, so it will not // enforce the constraints on them. AdditionalExcludedNamespaces []string `json:"additionalExcludedNamespaces,omitempty" yaml:"additionalExcludedNamespaces,omitempty" mapstructure:"additionalExcludedNamespaces,omitempty"` - // The enforcement action to use for the gatekeeper module + // The default enforcement action to use for the included constraints. `deny` will + // block the admission when violations to the policies are found, `warn` will show + // a message to the user but will admit the violating requests and `dryrun` won't + // give any feedback to the user but it will log the violations. EnforcementAction SpecDistributionModulesPolicyGatekeeperEnforcementAction `json:"enforcementAction" yaml:"enforcementAction" mapstructure:"enforcementAction"` - // If true, the default policies will be installed + // Set to `false` to avoid installing the default Gatekeeper policies (constraints + // templates and constraints) included with the distribution. InstallDefaultPolicies bool `json:"installDefaultPolicies" yaml:"installDefaultPolicies" mapstructure:"installDefaultPolicies"` // Overrides corresponds to the JSON schema field "overrides". @@ -1232,18 +1340,22 @@ const ( SpecDistributionModulesPolicyGatekeeperEnforcementActionWarn SpecDistributionModulesPolicyGatekeeperEnforcementAction = "warn" ) +// Configuration for the Kyverno package. type SpecDistributionModulesPolicyKyverno struct { // This parameter adds namespaces to Kyverno's exemption list, so it will not - // enforce the constraints on them. + // enforce the policies on them. AdditionalExcludedNamespaces []string `json:"additionalExcludedNamespaces,omitempty" yaml:"additionalExcludedNamespaces,omitempty" mapstructure:"additionalExcludedNamespaces,omitempty"` - // If true, the default policies will be installed + // Set to `false` to avoid installing the default Kyverno policies included with + // distribution. InstallDefaultPolicies bool `json:"installDefaultPolicies" yaml:"installDefaultPolicies" mapstructure:"installDefaultPolicies"` // Overrides corresponds to the JSON schema field "overrides". Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - // The validation failure action to use for the kyverno module + // The validation failure action to use for the policies, `Enforce` will block + // when a request does not comply with the policies and `Audit` will not block but + // log when a request does not comply with the policies. ValidationFailureAction SpecDistributionModulesPolicyKyvernoValidationFailureAction `json:"validationFailureAction" yaml:"validationFailureAction" mapstructure:"validationFailureAction"` } @@ -1262,6 +1374,7 @@ const ( SpecDistributionModulesPolicyTypeNone SpecDistributionModulesPolicyType = "none" ) +// Configuration for the Tracing module. type SpecDistributionModulesTracing struct { // Minio corresponds to the JSON schema field "minio". Minio *SpecDistributionModulesTracingMinio `json:"minio,omitempty" yaml:"minio,omitempty" mapstructure:"minio,omitempty"` @@ -1272,10 +1385,14 @@ type SpecDistributionModulesTracing struct { // Tempo corresponds to the JSON schema field "tempo". Tempo *SpecDistributionModulesTracingTempo `json:"tempo,omitempty" yaml:"tempo,omitempty" mapstructure:"tempo,omitempty"` - // The type of tracing to use, either ***none*** or ***tempo*** + // The type of tracing to use, either `none` or `tempo`. `none` will disable the + // Tracing module and `tempo` will install a Grafana Tempo deployment. + // + // Default is `tempo`. Type SpecDistributionModulesTracingType `json:"type" yaml:"type" mapstructure:"type"` } +// Configuration for Tracing's MinIO deployment. type SpecDistributionModulesTracingMinio struct { // Overrides corresponds to the JSON schema field "overrides". Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` @@ -1283,29 +1400,32 @@ type SpecDistributionModulesTracingMinio struct { // RootUser corresponds to the JSON schema field "rootUser". RootUser *SpecDistributionModulesTracingMinioRootUser `json:"rootUser,omitempty" yaml:"rootUser,omitempty" mapstructure:"rootUser,omitempty"` - // The storage size for the minio pods + // The PVC size for each MinIO disk, 6 disks total. StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"` } type SpecDistributionModulesTracingMinioRootUser struct { - // The password for the minio root user + // The password for the default MinIO root user. Password *string `json:"password,omitempty" yaml:"password,omitempty" mapstructure:"password,omitempty"` - // The username for the minio root user + // The username for the default MinIO root user. Username *string `json:"username,omitempty" yaml:"username,omitempty" mapstructure:"username,omitempty"` } +// Configuration for the Tempo package. type SpecDistributionModulesTracingTempo struct { - // The backend for the tempo pods, must be ***minio*** or ***externalEndpoint*** + // The storage backend type for Tempo. `minio` will use an in-cluster MinIO + // deployment for object storage, `externalEndpoint` can be used to point to an + // external S3-compatible object storage instead of deploying an in-cluster MinIO. Backend *SpecDistributionModulesTracingTempoBackend `json:"backend,omitempty" yaml:"backend,omitempty" mapstructure:"backend,omitempty"` - // ExternalEndpoint corresponds to the JSON schema field "externalEndpoint". + // Configuration for Tempo's external storage backend. ExternalEndpoint *SpecDistributionModulesTracingTempoExternalEndpoint `json:"externalEndpoint,omitempty" yaml:"externalEndpoint,omitempty" mapstructure:"externalEndpoint,omitempty"` // Overrides corresponds to the JSON schema field "overrides". Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - // The retention time for the tempo pods + // The retention time for the traces stored in Tempo. RetentionTime *string `json:"retentionTime,omitempty" yaml:"retentionTime,omitempty" mapstructure:"retentionTime,omitempty"` } @@ -1316,20 +1436,21 @@ const ( SpecDistributionModulesTracingTempoBackendMinio SpecDistributionModulesTracingTempoBackend = "minio" ) +// Configuration for Tempo's external storage backend. type SpecDistributionModulesTracingTempoExternalEndpoint struct { - // The access key id of the external tempo backend + // The access key ID (username) for the external S3-compatible bucket. AccessKeyId *string `json:"accessKeyId,omitempty" yaml:"accessKeyId,omitempty" mapstructure:"accessKeyId,omitempty"` - // The bucket name of the external tempo backend + // The bucket name of the external S3-compatible object storage. BucketName *string `json:"bucketName,omitempty" yaml:"bucketName,omitempty" mapstructure:"bucketName,omitempty"` - // The endpoint of the external tempo backend + // External S3-compatible endpoint for Tempo's storage. Endpoint *string `json:"endpoint,omitempty" yaml:"endpoint,omitempty" mapstructure:"endpoint,omitempty"` - // If true, the external tempo backend will not use tls + // If true, will use HTTP as protocol instead of HTTPS. Insecure *bool `json:"insecure,omitempty" yaml:"insecure,omitempty" mapstructure:"insecure,omitempty"` - // The secret access key of the external tempo backend + // The secret access key (password) for the external S3-compatible bucket. SecretAccessKey *string `json:"secretAccessKey,omitempty" yaml:"secretAccessKey,omitempty" mapstructure:"secretAccessKey,omitempty"` } @@ -1341,88 +1462,98 @@ const ( ) type SpecInfrastructure struct { - // This key defines the VPC that will be created in AWS + // Vpc corresponds to the JSON schema field "vpc". Vpc *SpecInfrastructureVpc `json:"vpc,omitempty" yaml:"vpc,omitempty" mapstructure:"vpc,omitempty"` - // This section defines the creation of VPN bastions + // Vpn corresponds to the JSON schema field "vpn". Vpn *SpecInfrastructureVpn `json:"vpn,omitempty" yaml:"vpn,omitempty" mapstructure:"vpn,omitempty"` } +// Configuration for the VPC that will be created to host the EKS cluster and its +// related resources. If you already have a VPC that you want to use, leave this +// section empty and use `.spec.kubernetes.vpcId` instead. type SpecInfrastructureVpc struct { // Network corresponds to the JSON schema field "network". Network SpecInfrastructureVpcNetwork `json:"network" yaml:"network" mapstructure:"network"` } type SpecInfrastructureVpcNetwork struct { - // This is the CIDR of the VPC that will be created + // The network CIDR for the VPC that will be created Cidr TypesCidr `json:"cidr" yaml:"cidr" mapstructure:"cidr"` // SubnetsCidrs corresponds to the JSON schema field "subnetsCidrs". SubnetsCidrs SpecInfrastructureVpcNetworkSubnetsCidrs `json:"subnetsCidrs" yaml:"subnetsCidrs" mapstructure:"subnetsCidrs"` } +// Network CIDRS configuration for private and public subnets. type SpecInfrastructureVpcNetworkSubnetsCidrs struct { - // These are the CIRDs for the private subnets, where the nodes, the pods, and the + // Network CIRDs for the private subnets, where the nodes, the pods, and the // private load balancers will be created Private []TypesCidr `json:"private" yaml:"private" mapstructure:"private"` - // These are the CIDRs for the public subnets, where the public load balancers and - // the VPN servers will be created + // Network CIDRs for the public subnets, where the public load balancers and the + // VPN servers will be created Public []TypesCidr `json:"public" yaml:"public" mapstructure:"public"` } +// Configuration for the VPN server instances. type SpecInfrastructureVpn struct { - // This value defines the prefix that will be used to create the bucket name where - // the VPN servers will store the states + // This value defines the prefix for the bucket name where the VPN servers will + // store their state (VPN certificates, users). BucketNamePrefix *TypesAwsS3BucketNamePrefix `json:"bucketNamePrefix,omitempty" yaml:"bucketNamePrefix,omitempty" mapstructure:"bucketNamePrefix,omitempty"` - // The dhParamsBits size used for the creation of the .pem file that will be used - // in the dh openvpn server.conf file + // The `dhParamsBits` size used for the creation of the .pem file that will be + // used in the dh openvpn server.conf file. DhParamsBits *int `json:"dhParamsBits,omitempty" yaml:"dhParamsBits,omitempty" mapstructure:"dhParamsBits,omitempty"` - // The size of the disk in GB + // The size of the disk in GB for each VPN server. Example: entering `50` will + // create disks of 50 GB. DiskSize *int `json:"diskSize,omitempty" yaml:"diskSize,omitempty" mapstructure:"diskSize,omitempty"` - // Overrides the default IAM user name for the VPN + // Overrides IAM user name for the VPN. Default is to use the cluster name. IamUserNameOverride *TypesAwsIamRoleName `json:"iamUserNameOverride,omitempty" yaml:"iamUserNameOverride,omitempty" mapstructure:"iamUserNameOverride,omitempty"` - // The size of the AWS EC2 instance + // The type of the AWS EC2 instance for each VPN server. Follows AWS EC2 + // nomenclature. Example: `t3-micro`. InstanceType *string `json:"instanceType,omitempty" yaml:"instanceType,omitempty" mapstructure:"instanceType,omitempty"` - // The number of instances to create, 0 to skip the creation + // The number of VPN server instances to create, `0` to skip the creation. Instances *int `json:"instances,omitempty" yaml:"instances,omitempty" mapstructure:"instances,omitempty"` - // The username of the account to create in the bastion's operating system + // The username of the account to create in the bastion's operating system. OperatorName *string `json:"operatorName,omitempty" yaml:"operatorName,omitempty" mapstructure:"operatorName,omitempty"` - // The port used by the OpenVPN server + // The port where each OpenVPN server will listen for connections. Port *TypesTcpPort `json:"port,omitempty" yaml:"port,omitempty" mapstructure:"port,omitempty"` // Ssh corresponds to the JSON schema field "ssh". Ssh SpecInfrastructureVpnSsh `json:"ssh" yaml:"ssh" mapstructure:"ssh"` - // The VPC ID where the VPN servers will be created, required only if - // .spec.infrastructure.vpc is omitted + // The ID of the VPC where the VPN server instances will be created, required only + // if `.spec.infrastructure.vpc` is omitted. VpcId *TypesAwsVpcId `json:"vpcId,omitempty" yaml:"vpcId,omitempty" mapstructure:"vpcId,omitempty"` - // The CIDR that will be used to assign IP addresses to the VPN clients when - // connected + // The network CIDR that will be used to assign IP addresses to the VPN clients + // when connected. VpnClientsSubnetCidr TypesCidr `json:"vpnClientsSubnetCidr" yaml:"vpnClientsSubnetCidr" mapstructure:"vpnClientsSubnetCidr"` } type SpecInfrastructureVpnSsh struct { - // The CIDR enabled in the security group that can access the bastions in SSH + // The network CIDR enabled in the security group to access the VPN servers + // (bastions) via SSH. Setting this to `0.0.0.0/0` will allow any source. AllowedFromCidrs []TypesCidr `json:"allowedFromCidrs" yaml:"allowedFromCidrs" mapstructure:"allowedFromCidrs"` - // The github user name list that will be used to get the ssh public key that will - // be added as authorized key to the operatorName user + // List of GitHub usernames from whom get their SSH public key and add as + // authorized keys of the `operatorName` user. GithubUsersName []string `json:"githubUsersName" yaml:"githubUsersName" mapstructure:"githubUsersName"` - // This value defines the public keys that will be added to the bastion's - // operating system NOTES: Not yet implemented + // **NOT IN USE**, use `githubUsersName` instead. This value defines the public + // keys that will be added to the bastion's operating system. PublicKeys []interface{} `json:"publicKeys,omitempty" yaml:"publicKeys,omitempty" mapstructure:"publicKeys,omitempty"` } +// Defines the Kubernetes components configuration and the values needed for the +// `kubernetes` phase of furyctl. type SpecKubernetes struct { // ApiServer corresponds to the JSON schema field "apiServer". ApiServer SpecKubernetesAPIServer `json:"apiServer" yaml:"apiServer" mapstructure:"apiServer"` @@ -1430,71 +1561,81 @@ type SpecKubernetes struct { // AwsAuth corresponds to the JSON schema field "awsAuth". AwsAuth *SpecKubernetesAwsAuth `json:"awsAuth,omitempty" yaml:"awsAuth,omitempty" mapstructure:"awsAuth,omitempty"` - // Overrides the default IAM role name prefix for the EKS cluster + // Overrides the default prefix for the IAM role name of the EKS cluster. If not + // set, a name will be generated from the cluster name. ClusterIAMRoleNamePrefixOverride *TypesAwsIamRoleNamePrefix `json:"clusterIAMRoleNamePrefixOverride,omitempty" yaml:"clusterIAMRoleNamePrefixOverride,omitempty" mapstructure:"clusterIAMRoleNamePrefixOverride,omitempty"` - // Optional Kubernetes Cluster log retention in days. Defaults to 90 days. - LogRetentionDays *int `json:"logRetentionDays,omitempty" yaml:"logRetentionDays,omitempty" mapstructure:"logRetentionDays,omitempty"` + // Optional Kubernetes Cluster log retention in CloudWatch, expressed in days. + // Setting the value to zero (`0`) makes retention last forever. Default is `90` + // days. + LogRetentionDays *SpecKubernetesLogRetentionDays `json:"logRetentionDays,omitempty" yaml:"logRetentionDays,omitempty" mapstructure:"logRetentionDays,omitempty"` // Optional list of Kubernetes Cluster log types to enable. Defaults to all types. LogsTypes []SpecKubernetesLogsTypesElem `json:"logsTypes,omitempty" yaml:"logsTypes,omitempty" mapstructure:"logsTypes,omitempty"` - // This key contains the ssh public key that can connect to the nodes via SSH - // using the ec2-user user + // The SSH public key that can connect to the nodes via SSH using the `ec2-user` + // user. Example: the contents of your `~/.ssh/id_ras.pub` file. NodeAllowedSshPublicKey interface{} `json:"nodeAllowedSshPublicKey" yaml:"nodeAllowedSshPublicKey" mapstructure:"nodeAllowedSshPublicKey"` // NodePools corresponds to the JSON schema field "nodePools". NodePools []SpecKubernetesNodePool `json:"nodePools" yaml:"nodePools" mapstructure:"nodePools"` - // Either `launch_configurations`, `launch_templates` or `both`. For new clusters - // use `launch_templates`, for existing cluster you'll need to migrate from - // `launch_configurations` to `launch_templates` using `both` as interim. + // Accepted values are `launch_configurations`, `launch_templates` or `both`. For + // new clusters use `launch_templates`, for adopting existing cluster you'll need + // to migrate from `launch_configurations` to `launch_templates` using `both` as + // interim. NodePoolsLaunchKind SpecKubernetesNodePoolsLaunchKind `json:"nodePoolsLaunchKind" yaml:"nodePoolsLaunchKind" mapstructure:"nodePoolsLaunchKind"` - // This value defines the CIDR that will be used to assign IP addresses to the - // services + // This value defines the network CIDR that will be used to assign IP addresses to + // Kubernetes services. ServiceIpV4Cidr *TypesCidr `json:"serviceIpV4Cidr,omitempty" yaml:"serviceIpV4Cidr,omitempty" mapstructure:"serviceIpV4Cidr,omitempty"` - // This value defines the subnet IDs where the EKS cluster will be created, - // required only if .spec.infrastructure.vpc is omitted + // Required only if `.spec.infrastructure.vpc` is omitted. This value defines the + // ID of the subnet where the EKS cluster will be created. SubnetIds []TypesAwsSubnetId `json:"subnetIds,omitempty" yaml:"subnetIds,omitempty" mapstructure:"subnetIds,omitempty"` - // This value defines the VPC ID where the EKS cluster will be created, required - // only if .spec.infrastructure.vpc is omitted + // Required only if `.spec.infrastructure.vpc` is omitted. This value defines the + // ID of the VPC where the EKS cluster and its related resources will be created. VpcId *TypesAwsVpcId `json:"vpcId,omitempty" yaml:"vpcId,omitempty" mapstructure:"vpcId,omitempty"` - // Overrides the default IAM role name prefix for the EKS workers + // Overrides the default prefix for the IAM role name of the EKS workers. If not + // set, a name will be generated from the cluster name. WorkersIAMRoleNamePrefixOverride *TypesAwsIamRoleNamePrefix `json:"workersIAMRoleNamePrefixOverride,omitempty" yaml:"workersIAMRoleNamePrefixOverride,omitempty" mapstructure:"workersIAMRoleNamePrefixOverride,omitempty"` } type SpecKubernetesAPIServer struct { - // This value defines if the API server will be accessible only from the private - // subnets + // This value defines if the Kubernetes API server will be accessible from the + // private subnets. Default it `true`. PrivateAccess bool `json:"privateAccess" yaml:"privateAccess" mapstructure:"privateAccess"` - // This value defines the CIDRs that will be allowed to access the API server from - // the private subnets + // The network CIDRs from the private subnets that will be allowed access the + // Kubernetes API server. PrivateAccessCidrs []TypesCidr `json:"privateAccessCidrs,omitempty" yaml:"privateAccessCidrs,omitempty" mapstructure:"privateAccessCidrs,omitempty"` - // This value defines if the API server will be accessible from the public subnets + // This value defines if the Kubernetes API server will be accessible from the + // public subnets. Default is `false`. PublicAccess bool `json:"publicAccess" yaml:"publicAccess" mapstructure:"publicAccess"` - // This value defines the CIDRs that will be allowed to access the API server from - // the public subnets + // The network CIDRs from the public subnets that will be allowed access the + // Kubernetes API server. PublicAccessCidrs []TypesCidr `json:"publicAccessCidrs,omitempty" yaml:"publicAccessCidrs,omitempty" mapstructure:"publicAccessCidrs,omitempty"` } +// Optional additional security configuration for EKS IAM via the `aws-auth` +// configmap. +// +// Ref: https://docs.aws.amazon.com/eks/latest/userguide/auth-configmap.html type SpecKubernetesAwsAuth struct { // This optional array defines additional AWS accounts that will be added to the - // aws-auth configmap + // `aws-auth` configmap. AdditionalAccounts []string `json:"additionalAccounts,omitempty" yaml:"additionalAccounts,omitempty" mapstructure:"additionalAccounts,omitempty"` // This optional array defines additional IAM roles that will be added to the - // aws-auth configmap + // `aws-auth` configmap. Roles []SpecKubernetesAwsAuthRole `json:"roles,omitempty" yaml:"roles,omitempty" mapstructure:"roles,omitempty"` // This optional array defines additional IAM users that will be added to the - // aws-auth configmap + // `aws-auth` configmap. Users []SpecKubernetesAwsAuthUser `json:"users,omitempty" yaml:"users,omitempty" mapstructure:"users,omitempty"` } @@ -1520,6 +1661,8 @@ type SpecKubernetesAwsAuthUser struct { Username string `json:"username" yaml:"username" mapstructure:"username"` } +type SpecKubernetesLogRetentionDays int + type SpecKubernetesLogsTypesElem string const ( @@ -1530,6 +1673,8 @@ const ( SpecKubernetesLogsTypesElemScheduler SpecKubernetesLogsTypesElem = "scheduler" ) +// Array with all the node pool definitions that will join the cluster. Each item +// is an object. type SpecKubernetesNodePool struct { // AdditionalFirewallRules corresponds to the JSON schema field // "additionalFirewallRules". @@ -1539,31 +1684,32 @@ type SpecKubernetesNodePool struct { Ami *SpecKubernetesNodePoolAmi `json:"ami,omitempty" yaml:"ami,omitempty" mapstructure:"ami,omitempty"` // This optional array defines additional target groups to attach to the instances - // in the node pool + // in the node pool. AttachedTargetGroups []TypesAwsArn `json:"attachedTargetGroups,omitempty" yaml:"attachedTargetGroups,omitempty" mapstructure:"attachedTargetGroups,omitempty"` - // The container runtime to use for the nodes + // The container runtime to use in the nodes of the node pool. Default is + // `containerd`. ContainerRuntime *SpecKubernetesNodePoolContainerRuntime `json:"containerRuntime,omitempty" yaml:"containerRuntime,omitempty" mapstructure:"containerRuntime,omitempty"` // Instance corresponds to the JSON schema field "instance". Instance SpecKubernetesNodePoolInstance `json:"instance" yaml:"instance" mapstructure:"instance"` - // Kubernetes labels that will be added to the nodes + // Kubernetes labels that will be added to the nodes. Labels TypesKubeLabels_1 `json:"labels,omitempty" yaml:"labels,omitempty" mapstructure:"labels,omitempty"` - // The name of the node pool + // The name of the node pool. Name string `json:"name" yaml:"name" mapstructure:"name"` // Size corresponds to the JSON schema field "size". Size SpecKubernetesNodePoolSize `json:"size" yaml:"size" mapstructure:"size"` - // This value defines the subnet IDs where the nodes will be created + // Optional list of subnet IDs where to create the nodes. SubnetIds []TypesAwsSubnetId `json:"subnetIds,omitempty" yaml:"subnetIds,omitempty" mapstructure:"subnetIds,omitempty"` - // AWS tags that will be added to the ASG and EC2 instances + // AWS tags that will be added to the ASG and EC2 instances. Tags TypesAwsTags `json:"tags,omitempty" yaml:"tags,omitempty" mapstructure:"tags,omitempty"` - // Kubernetes taints that will be added to the nodes + // Kubernetes taints that will be added to the nodes. Taints TypesKubeTaints `json:"taints,omitempty" yaml:"taints,omitempty" mapstructure:"taints,omitempty"` // Type corresponds to the JSON schema field "type". @@ -1583,10 +1729,11 @@ type SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock struct { // Protocol corresponds to the JSON schema field "protocol". Protocol TypesAwsIpProtocol `json:"protocol" yaml:"protocol" mapstructure:"protocol"` - // Tags corresponds to the JSON schema field "tags". + // Additional AWS tags for the Firewall rule. Tags TypesAwsTags `json:"tags,omitempty" yaml:"tags,omitempty" mapstructure:"tags,omitempty"` - // Type corresponds to the JSON schema field "type". + // The type of the Firewall rule, can be `ingress` for incoming traffic or + // `egress` for outgoing traffic. Type SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType `json:"type" yaml:"type" mapstructure:"type"` } @@ -1597,6 +1744,7 @@ const ( SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockTypeIngress SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType = "ingress" ) +// Port range for the Firewall Rule. type SpecKubernetesNodePoolAdditionalFirewallRulePorts struct { // From corresponds to the JSON schema field "from". From TypesTcpPort `json:"from" yaml:"from" mapstructure:"from"` @@ -1606,22 +1754,23 @@ type SpecKubernetesNodePoolAdditionalFirewallRulePorts struct { } type SpecKubernetesNodePoolAdditionalFirewallRuleSelf struct { - // The name of the FW rule + // The name of the Firewall rule. Name string `json:"name" yaml:"name" mapstructure:"name"` // Ports corresponds to the JSON schema field "ports". Ports SpecKubernetesNodePoolAdditionalFirewallRulePorts `json:"ports" yaml:"ports" mapstructure:"ports"` - // The protocol of the FW rule + // The protocol of the Firewall rule. Protocol TypesAwsIpProtocol `json:"protocol" yaml:"protocol" mapstructure:"protocol"` - // If true, the source will be the security group itself + // If `true`, the source will be the security group itself. Self bool `json:"self" yaml:"self" mapstructure:"self"` - // The tags of the FW rule + // Additional AWS tags for the Firewall rule. Tags TypesAwsTags `json:"tags,omitempty" yaml:"tags,omitempty" mapstructure:"tags,omitempty"` - // The type of the FW rule can be ingress or egress + // The type of the Firewall rule, can be `ingress` for incoming traffic or + // `egress` for outgoing traffic. Type SpecKubernetesNodePoolAdditionalFirewallRuleSelfType `json:"type" yaml:"type" mapstructure:"type"` } @@ -1633,22 +1782,23 @@ const ( ) type SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId struct { - // The name of the FW rule + // The name for the additional Firewall rule Security Group. Name string `json:"name" yaml:"name" mapstructure:"name"` // Ports corresponds to the JSON schema field "ports". Ports SpecKubernetesNodePoolAdditionalFirewallRulePorts `json:"ports" yaml:"ports" mapstructure:"ports"` - // The protocol of the FW rule + // The protocol of the Firewall rule. Protocol TypesAwsIpProtocol `json:"protocol" yaml:"protocol" mapstructure:"protocol"` - // The source security group ID + // The source security group ID. SourceSecurityGroupId string `json:"sourceSecurityGroupId" yaml:"sourceSecurityGroupId" mapstructure:"sourceSecurityGroupId"` - // The tags of the FW rule + // Additional AWS tags for the Firewall rule. Tags TypesAwsTags `json:"tags,omitempty" yaml:"tags,omitempty" mapstructure:"tags,omitempty"` - // The type of the FW rule can be ingress or egress + // The type of the Firewall rule, can be `ingress` for incoming traffic or + // `egress` for outgoing traffic. Type SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType `json:"type" yaml:"type" mapstructure:"type"` } @@ -1659,9 +1809,11 @@ const ( SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdTypeIngress SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType = "ingress" ) +// Optional additional firewall rules that will be attached to the nodes. type SpecKubernetesNodePoolAdditionalFirewallRules struct { - // The CIDR blocks for the FW rule. At the moment the first item of the list will - // be used, others will be ignored. + // The CIDR blocks objects definition for the Firewall rule. Even though it is a + // list, only one item is currently supported. See + // https://github.com/sighupio/fury-eks-installer/issues/46 for more details. CidrBlocks []SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock `json:"cidrBlocks,omitempty" yaml:"cidrBlocks,omitempty" mapstructure:"cidrBlocks,omitempty"` // Self corresponds to the JSON schema field "self". @@ -1673,10 +1825,10 @@ type SpecKubernetesNodePoolAdditionalFirewallRules struct { } type SpecKubernetesNodePoolAmi struct { - // The AMI ID to use for the nodes + // Optional. Custom AMI ID to use for the nodes. Id string `json:"id" yaml:"id" mapstructure:"id"` - // The owner of the AMI + // Optional. The owner of the custom AMI. Owner string `json:"owner" yaml:"owner" mapstructure:"owner"` } @@ -1687,20 +1839,25 @@ const ( SpecKubernetesNodePoolContainerRuntimeDocker SpecKubernetesNodePoolContainerRuntime = "docker" ) +// Configuration for the instances that will be used in the node pool. type SpecKubernetesNodePoolInstance struct { - // MaxPods corresponds to the JSON schema field "maxPods". + // Set the maximum pods per node to a custom value. If not set will use EKS + // default value that depends on the instance type. + // + // Ref: + // https://github.com/awslabs/amazon-eks-ami/blob/main/templates/shared/runtime/eni-max-pods.txt MaxPods *int `json:"maxPods,omitempty" yaml:"maxPods,omitempty" mapstructure:"maxPods,omitempty"` - // If true, the nodes will be created as spot instances + // If `true`, the nodes will be created as spot instances. Default is `false`. Spot *bool `json:"spot,omitempty" yaml:"spot,omitempty" mapstructure:"spot,omitempty"` - // The instance type to use for the nodes + // The instance type to use for the nodes. Type string `json:"type" yaml:"type" mapstructure:"type"` - // The size of the disk in GB + // The size of the disk in GB. VolumeSize *int `json:"volumeSize,omitempty" yaml:"volumeSize,omitempty" mapstructure:"volumeSize,omitempty"` - // VolumeType corresponds to the JSON schema field "volumeType". + // Volume type for the instance disk. Default is `gp2`. VolumeType *SpecKubernetesNodePoolInstanceVolumeType `json:"volumeType,omitempty" yaml:"volumeType,omitempty" mapstructure:"volumeType,omitempty"` } @@ -1714,10 +1871,10 @@ const ( ) type SpecKubernetesNodePoolSize struct { - // The maximum number of nodes in the node pool + // The maximum number of nodes in the node pool. Max int `json:"max" yaml:"max" mapstructure:"max"` - // The minimum number of nodes in the node pool + // The minimum number of nodes in the node pool. Min int `json:"min" yaml:"min" mapstructure:"min"` } @@ -1756,6 +1913,10 @@ type SpecPluginsHelmReleases []struct { // The chart of the release Chart string `json:"chart" yaml:"chart" mapstructure:"chart"` + // Disable running `helm diff` validation when installing the plugin, it will + // still be done when upgrading. + DisableValidationOnInstall *bool `json:"disableValidationOnInstall,omitempty" yaml:"disableValidationOnInstall,omitempty" mapstructure:"disableValidationOnInstall,omitempty"` + // The name of the release Name string `json:"name" yaml:"name" mapstructure:"name"` @@ -1806,24 +1967,26 @@ type SpecToolsConfigurationTerraform struct { State SpecToolsConfigurationTerraformState `json:"state" yaml:"state" mapstructure:"state"` } +// Configuration for storing the Terraform state of the cluster. type SpecToolsConfigurationTerraformState struct { // S3 corresponds to the JSON schema field "s3". S3 SpecToolsConfigurationTerraformStateS3 `json:"s3" yaml:"s3" mapstructure:"s3"` } +// Configuration for the S3 bucket used to store the Terraform state. type SpecToolsConfigurationTerraformStateS3 struct { - // This value defines which bucket will be used to store all the states + // This value defines which bucket will be used to store all the states. BucketName TypesAwsS3BucketName `json:"bucketName" yaml:"bucketName" mapstructure:"bucketName"` // This value defines which folder will be used to store all the states inside the - // bucket + // bucket. KeyPrefix TypesAwsS3KeyPrefix `json:"keyPrefix" yaml:"keyPrefix" mapstructure:"keyPrefix"` - // This value defines in which region the bucket is located + // This value defines in which region the bucket is located. Region TypesAwsRegion `json:"region" yaml:"region" mapstructure:"region"` // This value defines if the region of the bucket should be validated or not by - // Terraform, useful when using a bucket in a recently added region + // Terraform, useful when using a bucket in a recently added region. SkipRegionValidation *bool `json:"skipRegionValidation,omitempty" yaml:"skipRegionValidation,omitempty" mapstructure:"skipRegionValidation,omitempty"` } @@ -1835,2375 +1998,2483 @@ type TypesAwsIamRoleNamePrefix string type TypesAwsIpProtocol string -type TypesAwsRegion string - -const ( - TypesAwsRegionAfSouth1 TypesAwsRegion = "af-south-1" - TypesAwsRegionApEast1 TypesAwsRegion = "ap-east-1" - TypesAwsRegionApNortheast1 TypesAwsRegion = "ap-northeast-1" - TypesAwsRegionApNortheast2 TypesAwsRegion = "ap-northeast-2" - TypesAwsRegionApNortheast3 TypesAwsRegion = "ap-northeast-3" - TypesAwsRegionApSouth1 TypesAwsRegion = "ap-south-1" - TypesAwsRegionApSouth2 TypesAwsRegion = "ap-south-2" - TypesAwsRegionApSoutheast1 TypesAwsRegion = "ap-southeast-1" - TypesAwsRegionApSoutheast2 TypesAwsRegion = "ap-southeast-2" - TypesAwsRegionApSoutheast3 TypesAwsRegion = "ap-southeast-3" - TypesAwsRegionApSoutheast4 TypesAwsRegion = "ap-southeast-4" - TypesAwsRegionCaCentral1 TypesAwsRegion = "ca-central-1" - TypesAwsRegionEuCentral1 TypesAwsRegion = "eu-central-1" - TypesAwsRegionEuCentral2 TypesAwsRegion = "eu-central-2" - TypesAwsRegionEuNorth1 TypesAwsRegion = "eu-north-1" - TypesAwsRegionEuSouth1 TypesAwsRegion = "eu-south-1" - TypesAwsRegionEuSouth2 TypesAwsRegion = "eu-south-2" - TypesAwsRegionEuWest1 TypesAwsRegion = "eu-west-1" - TypesAwsRegionEuWest2 TypesAwsRegion = "eu-west-2" - TypesAwsRegionEuWest3 TypesAwsRegion = "eu-west-3" - TypesAwsRegionMeCentral1 TypesAwsRegion = "me-central-1" - TypesAwsRegionMeSouth1 TypesAwsRegion = "me-south-1" - TypesAwsRegionSaEast1 TypesAwsRegion = "sa-east-1" - TypesAwsRegionUsEast1 TypesAwsRegion = "us-east-1" - TypesAwsRegionUsEast2 TypesAwsRegion = "us-east-2" - TypesAwsRegionUsGovEast1 TypesAwsRegion = "us-gov-east-1" - TypesAwsRegionUsGovWest1 TypesAwsRegion = "us-gov-west-1" - TypesAwsRegionUsWest1 TypesAwsRegion = "us-west-1" - TypesAwsRegionUsWest2 TypesAwsRegion = "us-west-2" -) +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType, v) + } + *j = SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType(v) + return nil +} -type TypesAwsS3BucketName string +var enumValues_SpecDistributionModulesNetworkingType = []interface{}{ + "none", +} -type TypesAwsS3BucketNamePrefix string +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesNetworkingType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesNetworkingType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesNetworkingType, v) + } + *j = SpecDistributionModulesNetworkingType(v) + return nil +} -type TypesAwsS3KeyPrefix string +const ( + TypesAwsRegionUsGovWest1 TypesAwsRegion = "us-gov-west-1" + TypesAwsRegionUsGovEast1 TypesAwsRegion = "us-gov-east-1" + TypesAwsRegionUsEast2 TypesAwsRegion = "us-east-2" +) -type TypesAwsSshPubKey string +var enumValues_SpecDistributionModulesPolicyGatekeeperEnforcementAction = []interface{}{ + "deny", + "dryrun", + "warn", +} -type TypesAwsSubnetId string +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesPolicyGatekeeperEnforcementAction) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesPolicyGatekeeperEnforcementAction { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesPolicyGatekeeperEnforcementAction, v) + } + *j = SpecDistributionModulesPolicyGatekeeperEnforcementAction(v) + return nil +} -type TypesAwsTags map[string]string +const ( + TypesAwsRegionUsEast1 TypesAwsRegion = "us-east-1" + TypesAwsRegionSaEast1 TypesAwsRegion = "sa-east-1" + TypesAwsRegionMeSouth1 TypesAwsRegion = "me-south-1" + TypesAwsRegionMeCentral1 TypesAwsRegion = "me-central-1" +) -type TypesAwsVpcId string +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesPolicyGatekeeper) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["enforcementAction"]; !ok || v == nil { + return fmt.Errorf("field enforcementAction in SpecDistributionModulesPolicyGatekeeper: required") + } + if v, ok := raw["installDefaultPolicies"]; !ok || v == nil { + return fmt.Errorf("field installDefaultPolicies in SpecDistributionModulesPolicyGatekeeper: required") + } + type Plain SpecDistributionModulesPolicyGatekeeper + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesPolicyGatekeeper(plain) + return nil +} -type TypesCidr string +const TypesAwsRegionEuWest3 TypesAwsRegion = "eu-west-3" -type TypesEnvRef string +var enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction = []interface{}{ + "Audit", + "Enforce", +} -type TypesFileRef string +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesPolicyKyvernoValidationFailureAction) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction, v) + } + *j = SpecDistributionModulesPolicyKyvernoValidationFailureAction(v) + return nil +} -type TypesFuryModuleComponentOverrides struct { - // The node selector to use to place the pods for the minio module - NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` +const ( + TypesAwsRegionEuWest2 TypesAwsRegion = "eu-west-2" + TypesAwsRegionEuWest1 TypesAwsRegion = "eu-west-1" + TypesAwsRegionEuSouth2 TypesAwsRegion = "eu-south-2" +) - // The tolerations that will be added to the pods for the cert-manager module - Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesPolicyKyverno) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["installDefaultPolicies"]; !ok || v == nil { + return fmt.Errorf("field installDefaultPolicies in SpecDistributionModulesPolicyKyverno: required") + } + if v, ok := raw["validationFailureAction"]; !ok || v == nil { + return fmt.Errorf("field validationFailureAction in SpecDistributionModulesPolicyKyverno: required") + } + type Plain SpecDistributionModulesPolicyKyverno + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesPolicyKyverno(plain) + return nil } -type TypesFuryModuleComponentOverridesWithIAMRoleName struct { - // IamRoleName corresponds to the JSON schema field "iamRoleName". - IamRoleName *TypesAwsIamRoleName `json:"iamRoleName,omitempty" yaml:"iamRoleName,omitempty" mapstructure:"iamRoleName,omitempty"` +const TypesAwsRegionEuSouth1 TypesAwsRegion = "eu-south-1" - // The node selector to use to place the pods for the load balancer controller - // module - NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` +var enumValues_SpecDistributionModulesPolicyType = []interface{}{ + "none", + "gatekeeper", + "kyverno", +} - // The tolerations that will be added to the pods for the cluster autoscaler - // module - Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesPolicyType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesPolicyType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesPolicyType, v) + } + *j = SpecDistributionModulesPolicyType(v) + return nil } -type TypesFuryModuleComponentOverrides_1 struct { - // NodeSelector corresponds to the JSON schema field "nodeSelector". - NodeSelector TypesKubeNodeSelector_1 `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` +const ( + TypesAwsRegionEuNorth1 TypesAwsRegion = "eu-north-1" + TypesAwsRegionEuCentral2 TypesAwsRegion = "eu-central-2" + TypesAwsRegionEuCentral1 TypesAwsRegion = "eu-central-1" + TypesAwsRegionCaCentral1 TypesAwsRegion = "ca-central-1" +) - // Tolerations corresponds to the JSON schema field "tolerations". - Tolerations []TypesKubeToleration_1 `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesPolicy) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesPolicy: required") + } + type Plain SpecDistributionModulesPolicy + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesPolicy(plain) + return nil } -type TypesFuryModuleOverrides struct { - // Ingresses corresponds to the JSON schema field "ingresses". - Ingresses TypesFuryModuleOverridesIngresses `json:"ingresses,omitempty" yaml:"ingresses,omitempty" mapstructure:"ingresses,omitempty"` - - // The node selector to use to place the pods for the dr module - NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` +const ( + TypesAwsRegionApSoutheast4 TypesAwsRegion = "ap-southeast-4" + TypesAwsRegionApSoutheast3 TypesAwsRegion = "ap-southeast-3" + TypesAwsRegionApSoutheast2 TypesAwsRegion = "ap-southeast-2" +) - // The tolerations that will be added to the pods for the monitoring module - Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` +var enumValues_SpecDistributionModulesTracingTempoBackend = []interface{}{ + "minio", + "externalEndpoint", } -type TypesFuryModuleOverridesIngress struct { - // If true, the ingress will not have authentication - DisableAuth *bool `json:"disableAuth,omitempty" yaml:"disableAuth,omitempty" mapstructure:"disableAuth,omitempty"` - - // The host of the ingress - Host *string `json:"host,omitempty" yaml:"host,omitempty" mapstructure:"host,omitempty"` - - // The ingress class of the ingress - IngressClass *string `json:"ingressClass,omitempty" yaml:"ingressClass,omitempty" mapstructure:"ingressClass,omitempty"` -} - -type TypesFuryModuleOverridesIngresses map[string]TypesFuryModuleOverridesIngress - -type TypesIpAddress string - -type TypesKubeLabels map[string]string - -type TypesKubeLabels_1 map[string]string - -type TypesKubeNodeSelector map[string]string - -type TypesKubeNodeSelector_1 map[string]string - -type TypesKubeResources struct { - // Limits corresponds to the JSON schema field "limits". - Limits *TypesKubeResourcesLimits `json:"limits,omitempty" yaml:"limits,omitempty" mapstructure:"limits,omitempty"` - - // Requests corresponds to the JSON schema field "requests". - Requests *TypesKubeResourcesRequests `json:"requests,omitempty" yaml:"requests,omitempty" mapstructure:"requests,omitempty"` -} - -type TypesKubeResourcesLimits struct { - // The cpu limit for the opensearch pods - Cpu *string `json:"cpu,omitempty" yaml:"cpu,omitempty" mapstructure:"cpu,omitempty"` - - // The memory limit for the opensearch pods - Memory *string `json:"memory,omitempty" yaml:"memory,omitempty" mapstructure:"memory,omitempty"` -} - -type TypesKubeResourcesRequests struct { - // The cpu request for the prometheus pods - Cpu *string `json:"cpu,omitempty" yaml:"cpu,omitempty" mapstructure:"cpu,omitempty"` - - // The memory request for the opensearch pods - Memory *string `json:"memory,omitempty" yaml:"memory,omitempty" mapstructure:"memory,omitempty"` -} - -type TypesKubeTaints []string - -type TypesKubeToleration struct { - // Effect corresponds to the JSON schema field "effect". - Effect TypesKubeTolerationEffect `json:"effect" yaml:"effect" mapstructure:"effect"` - - // The key of the toleration - Key string `json:"key" yaml:"key" mapstructure:"key"` - - // Operator corresponds to the JSON schema field "operator". - Operator *TypesKubeTolerationOperator `json:"operator,omitempty" yaml:"operator,omitempty" mapstructure:"operator,omitempty"` - - // The value of the toleration - Value *string `json:"value,omitempty" yaml:"value,omitempty" mapstructure:"value,omitempty"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesTracingTempoBackend) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesTracingTempoBackend { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesTracingTempoBackend, v) + } + *j = SpecDistributionModulesTracingTempoBackend(v) + return nil } -type TypesKubeTolerationEffect string - -const ( - TypesKubeTolerationEffectNoExecute TypesKubeTolerationEffect = "NoExecute" - TypesKubeTolerationEffectNoSchedule TypesKubeTolerationEffect = "NoSchedule" - TypesKubeTolerationEffectPreferNoSchedule TypesKubeTolerationEffect = "PreferNoSchedule" -) - -type TypesKubeTolerationEffect_1 string - -const ( - TypesKubeTolerationEffect_1_NoExecute TypesKubeTolerationEffect_1 = "NoExecute" - TypesKubeTolerationEffect_1_NoSchedule TypesKubeTolerationEffect_1 = "NoSchedule" - TypesKubeTolerationEffect_1_PreferNoSchedule TypesKubeTolerationEffect_1 = "PreferNoSchedule" -) - -type TypesKubeTolerationOperator string - -const ( - TypesKubeTolerationOperatorEqual TypesKubeTolerationOperator = "Equal" - TypesKubeTolerationOperatorExists TypesKubeTolerationOperator = "Exists" -) - -type TypesKubeTolerationOperator_1 string - const ( - TypesKubeTolerationOperator_1_Equal TypesKubeTolerationOperator_1 = "Equal" - TypesKubeTolerationOperator_1_Exists TypesKubeTolerationOperator_1 = "Exists" + TypesAwsRegionApSoutheast1 TypesAwsRegion = "ap-southeast-1" + TypesAwsRegionApSouth2 TypesAwsRegion = "ap-south-2" + TypesAwsRegionApSouth1 TypesAwsRegion = "ap-south-1" + TypesAwsRegionApNortheast3 TypesAwsRegion = "ap-northeast-3" + TypesAwsRegionApNortheast2 TypesAwsRegion = "ap-northeast-2" ) -type TypesKubeToleration_1 struct { - // Effect corresponds to the JSON schema field "effect". - Effect TypesKubeTolerationEffect_1 `json:"effect" yaml:"effect" mapstructure:"effect"` - - // Key corresponds to the JSON schema field "key". - Key string `json:"key" yaml:"key" mapstructure:"key"` - - // Operator corresponds to the JSON schema field "operator". - Operator *TypesKubeTolerationOperator_1 `json:"operator,omitempty" yaml:"operator,omitempty" mapstructure:"operator,omitempty"` - - // Value corresponds to the JSON schema field "value". - Value string `json:"value" yaml:"value" mapstructure:"value"` -} - -type TypesSemVer string - -type TypesSshPubKey string - -type TypesTcpPort int - -type TypesUri string - -var enumValues_EksclusterKfdV1Alpha2Kind = []interface{}{ - "EKSCluster", -} - -var enumValues_SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior = []interface{}{ - "create", - "replace", - "merge", -} - -var enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior = []interface{}{ - "create", - "replace", - "merge", -} - -var enumValues_SpecDistributionModulesAuthProviderType = []interface{}{ - "none", - "basicAuth", - "sso", -} - -var enumValues_SpecDistributionModulesDrType = []interface{}{ - "none", - "eks", -} - -var enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType = []interface{}{ - "dns01", - "http01", -} - -var enumValues_SpecDistributionModulesIngressNginxTLSProvider = []interface{}{ - "certManager", - "secret", - "none", -} - -var enumValues_SpecDistributionModulesIngressNginxType = []interface{}{ - "none", - "single", - "dual", -} - -var enumValues_SpecDistributionModulesLoggingLokiBackend = []interface{}{ - "minio", - "externalEndpoint", -} - -var enumValues_SpecDistributionModulesLoggingOpensearchType = []interface{}{ - "single", - "triple", -} - -var enumValues_SpecDistributionModulesLoggingType = []interface{}{ - "none", - "opensearch", - "loki", - "customOutputs", -} - -var enumValues_SpecDistributionModulesMonitoringMimirBackend = []interface{}{ - "minio", - "externalEndpoint", -} - -var enumValues_SpecDistributionModulesMonitoringType = []interface{}{ - "none", - "prometheus", - "prometheusAgent", - "mimir", -} - -var enumValues_SpecDistributionModulesNetworkingType = []interface{}{ - "none", -} - -var enumValues_SpecDistributionModulesPolicyGatekeeperEnforcementAction = []interface{}{ - "deny", - "dryrun", - "warn", -} - -var enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction = []interface{}{ - "Audit", - "Enforce", -} - -var enumValues_SpecDistributionModulesPolicyType = []interface{}{ +var enumValues_SpecDistributionModulesTracingType = []interface{}{ "none", - "gatekeeper", - "kyverno", -} - -var enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSelfType = []interface{}{ - "ingress", - "egress", + "tempo", } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolAdditionalFirewallRuleSelfType) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesTracingType) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSelfType { + for _, expected := range enumValues_SpecDistributionModulesTracingType { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSelfType, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesTracingType, v) } - *j = SpecKubernetesNodePoolAdditionalFirewallRuleSelfType(v) + *j = SpecDistributionModulesTracingType(v) return nil } +const ( + TypesAwsRegionApNortheast1 TypesAwsRegion = "ap-northeast-1" + TypesAwsRegionApEast1 TypesAwsRegion = "ap-east-1" + TypesAwsRegionAfSouth1 TypesAwsRegion = "af-south-1" +) + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionCustomPatchesSecretGeneratorResource) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesTracing) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecDistributionCustomPatchesSecretGeneratorResource: required") + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesTracing: required") } - type Plain SpecDistributionCustomPatchesSecretGeneratorResource + type Plain SpecDistributionModulesTracing var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionCustomPatchesSecretGeneratorResource(plain) + *j = SpecDistributionModulesTracing(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionCustomPatchesSecretGeneratorResourceBehavior) UnmarshalJSON(b []byte) error { +func (j *TypesAwsRegion) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior { + for _, expected := range enumValues_TypesAwsRegion { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesAwsRegion, v) } - *j = SpecDistributionCustomPatchesSecretGeneratorResourceBehavior(v) + *j = TypesAwsRegion(v) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModules) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["cidrBlocks"]; !ok || v == nil { - return fmt.Errorf("field cidrBlocks in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required") - } - if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required") + if v, ok := raw["dr"]; !ok || v == nil { + return fmt.Errorf("field dr in SpecDistributionModules: required") } - if v, ok := raw["ports"]; !ok || v == nil { - return fmt.Errorf("field ports in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required") + if v, ok := raw["ingress"]; !ok || v == nil { + return fmt.Errorf("field ingress in SpecDistributionModules: required") } - if v, ok := raw["protocol"]; !ok || v == nil { - return fmt.Errorf("field protocol in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required") + if v, ok := raw["logging"]; !ok || v == nil { + return fmt.Errorf("field logging in SpecDistributionModules: required") } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required") + if v, ok := raw["policy"]; !ok || v == nil { + return fmt.Errorf("field policy in SpecDistributionModules: required") } - type Plain SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock + type Plain SpecDistributionModules var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - if plain.CidrBlocks != nil && len(plain.CidrBlocks) < 1 { - return fmt.Errorf("field %s length: must be >= %d", "cidrBlocks", 1) - } - *j = SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock(plain) + *j = SpecDistributionModules(plain) return nil } +var enumValues_TypesAwsRegion = []interface{}{ + "af-south-1", + "ap-east-1", + "ap-northeast-1", + "ap-northeast-2", + "ap-northeast-3", + "ap-south-1", + "ap-south-2", + "ap-southeast-1", + "ap-southeast-2", + "ap-southeast-3", + "ap-southeast-4", + "ca-central-1", + "eu-central-1", + "eu-central-2", + "eu-north-1", + "eu-south-1", + "eu-south-2", + "eu-west-1", + "eu-west-2", + "eu-west-3", + "me-central-1", + "me-south-1", + "sa-east-1", + "us-east-1", + "us-east-2", + "us-gov-east-1", + "us-gov-west-1", + "us-west-1", + "us-west-2", +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolAdditionalFirewallRuleSelf) UnmarshalJSON(b []byte) error { +func (j *SpecDistribution) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required") + if v, ok := raw["modules"]; !ok || v == nil { + return fmt.Errorf("field modules in SpecDistribution: required") } - if v, ok := raw["ports"]; !ok || v == nil { - return fmt.Errorf("field ports in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required") + type Plain SpecDistribution + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err } - if v, ok := raw["protocol"]; !ok || v == nil { - return fmt.Errorf("field protocol in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required") + *j = SpecDistribution(plain) + return nil +} + +type TypesCidr string + +type TypesAwsRegion string + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecInfrastructureVpcNetworkSubnetsCidrs) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err } - if v, ok := raw["self"]; !ok || v == nil { - return fmt.Errorf("field self in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required") + if v, ok := raw["private"]; !ok || v == nil { + return fmt.Errorf("field private in SpecInfrastructureVpcNetworkSubnetsCidrs: required") } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required") + if v, ok := raw["public"]; !ok || v == nil { + return fmt.Errorf("field public in SpecInfrastructureVpcNetworkSubnetsCidrs: required") } - type Plain SpecKubernetesNodePoolAdditionalFirewallRuleSelf + type Plain SpecInfrastructureVpcNetworkSubnetsCidrs var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecKubernetesNodePoolAdditionalFirewallRuleSelf(plain) + *j = SpecInfrastructureVpcNetworkSubnetsCidrs(plain) return nil } +type TypesAwsS3BucketName string + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuthDex) UnmarshalJSON(b []byte) error { +func (j *SpecInfrastructureVpcNetwork) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["connectors"]; !ok || v == nil { - return fmt.Errorf("field connectors in SpecDistributionModulesAuthDex: required") + if v, ok := raw["cidr"]; !ok || v == nil { + return fmt.Errorf("field cidr in SpecInfrastructureVpcNetwork: required") } - type Plain SpecDistributionModulesAuthDex + if v, ok := raw["subnetsCidrs"]; !ok || v == nil { + return fmt.Errorf("field subnetsCidrs in SpecInfrastructureVpcNetwork: required") + } + type Plain SpecInfrastructureVpcNetwork var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesAuthDex(plain) + *j = SpecInfrastructureVpcNetwork(plain) return nil } -var enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType = []interface{}{ - "ingress", - "egress", -} - // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesDrType) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType { + for _, expected := range enumValues_SpecDistributionModulesDrType { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesDrType, v) } - *j = SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType(v) + *j = SpecDistributionModulesDrType(v) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuthOverridesIngress) UnmarshalJSON(b []byte) error { +func (j *SpecInfrastructureVpc) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["host"]; !ok || v == nil { - return fmt.Errorf("field host in SpecDistributionModulesAuthOverridesIngress: required") + if v, ok := raw["network"]; !ok || v == nil { + return fmt.Errorf("field network in SpecInfrastructureVpc: required") } - if v, ok := raw["ingressClass"]; !ok || v == nil { - return fmt.Errorf("field ingressClass in SpecDistributionModulesAuthOverridesIngress: required") + type Plain SpecInfrastructureVpc + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err } - type Plain SpecDistributionModulesAuthOverridesIngress + *j = SpecInfrastructureVpc(plain) + return nil +} + +type TypesAwsS3BucketNamePrefix string + +type TypesTcpPort int + +var enumValues_SpecDistributionModulesDrType = []interface{}{ + "none", + "eks", +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecInfrastructureVpnSsh) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["allowedFromCidrs"]; !ok || v == nil { + return fmt.Errorf("field allowedFromCidrs in SpecInfrastructureVpnSsh: required") + } + if v, ok := raw["githubUsersName"]; !ok || v == nil { + return fmt.Errorf("field githubUsersName in SpecInfrastructureVpnSsh: required") + } + type Plain SpecInfrastructureVpnSsh var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesAuthOverridesIngress(plain) + if plain.GithubUsersName != nil && len(plain.GithubUsersName) < 1 { + return fmt.Errorf("field %s length: must be >= %d", "githubUsersName", 1) + } + *j = SpecInfrastructureVpnSsh(plain) + return nil +} + +type TypesAwsVpcId string + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesAws) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["clusterAutoscaler"]; !ok || v == nil { + return fmt.Errorf("field clusterAutoscaler in SpecDistributionModulesAws: required") + } + if v, ok := raw["ebsCsiDriver"]; !ok || v == nil { + return fmt.Errorf("field ebsCsiDriver in SpecDistributionModulesAws: required") + } + if v, ok := raw["loadBalancerController"]; !ok || v == nil { + return fmt.Errorf("field loadBalancerController in SpecDistributionModulesAws: required") + } + if v, ok := raw["overrides"]; !ok || v == nil { + return fmt.Errorf("field overrides in SpecDistributionModulesAws: required") + } + type Plain SpecDistributionModulesAws + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesAws(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { +func (j *SpecInfrastructureVpn) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["ssh"]; !ok || v == nil { + return fmt.Errorf("field ssh in SpecInfrastructureVpn: required") + } + if v, ok := raw["vpnClientsSubnetCidr"]; !ok || v == nil { + return fmt.Errorf("field vpnClientsSubnetCidr in SpecInfrastructureVpn: required") + } + type Plain SpecInfrastructureVpn + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecInfrastructureVpn(plain) + return nil +} + +// Override the common configuration with a particular configuration for the +// module. +type TypesFuryModuleOverrides struct { + // Ingresses corresponds to the JSON schema field "ingresses". + Ingresses TypesFuryModuleOverridesIngresses `json:"ingresses,omitempty" yaml:"ingresses,omitempty" mapstructure:"ingresses,omitempty"` + + // Set to override the node selector used to place the pods of the module. + NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` + + // Set to override the tolerations that will be added to the pods of the module. + Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` +} + +type TypesFuryModuleOverridesIngresses map[string]TypesFuryModuleOverridesIngress + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecKubernetesAPIServer) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { return err } - var ok bool - for _, expected := range enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType { - if reflect.DeepEqual(v, expected) { - ok = true - break - } + if v, ok := raw["privateAccess"]; !ok || v == nil { + return fmt.Errorf("field privateAccess in SpecKubernetesAPIServer: required") } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType, v) + if v, ok := raw["publicAccess"]; !ok || v == nil { + return fmt.Errorf("field publicAccess in SpecKubernetesAPIServer: required") } - *j = SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType(v) + type Plain SpecKubernetesAPIServer + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecKubernetesAPIServer(plain) return nil } -var enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType = []interface{}{ - "ingress", - "egress", +type TypesFuryModuleOverridesIngress struct { + // If true, the ingress will not have authentication even if + // `.spec.modules.auth.provider.type` is SSO or Basic Auth. + DisableAuth *bool `json:"disableAuth,omitempty" yaml:"disableAuth,omitempty" mapstructure:"disableAuth,omitempty"` + + // Use this host for the ingress instead of the default one. + Host *string `json:"host,omitempty" yaml:"host,omitempty" mapstructure:"host,omitempty"` + + // Use this ingress class for the ingress instead of the default one. + IngressClass *string `json:"ingressClass,omitempty" yaml:"ingressClass,omitempty" mapstructure:"ingressClass,omitempty"` } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesAwsAuthRole) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required") - } - if v, ok := raw["ports"]; !ok || v == nil { - return fmt.Errorf("field ports in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required") - } - if v, ok := raw["protocol"]; !ok || v == nil { - return fmt.Errorf("field protocol in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required") + if v, ok := raw["groups"]; !ok || v == nil { + return fmt.Errorf("field groups in SpecKubernetesAwsAuthRole: required") } - if v, ok := raw["sourceSecurityGroupId"]; !ok || v == nil { - return fmt.Errorf("field sourceSecurityGroupId in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required") + if v, ok := raw["rolearn"]; !ok || v == nil { + return fmt.Errorf("field rolearn in SpecKubernetesAwsAuthRole: required") } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required") + if v, ok := raw["username"]; !ok || v == nil { + return fmt.Errorf("field username in SpecKubernetesAwsAuthRole: required") } - type Plain SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId + type Plain SpecKubernetesAwsAuthRole var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId(plain) + *j = SpecKubernetesAwsAuthRole(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuthProviderBasicAuth) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesAwsLoadBalancerController) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["password"]; !ok || v == nil { - return fmt.Errorf("field password in SpecDistributionModulesAuthProviderBasicAuth: required") - } - if v, ok := raw["username"]; !ok || v == nil { - return fmt.Errorf("field username in SpecDistributionModulesAuthProviderBasicAuth: required") + if v, ok := raw["iamRoleArn"]; !ok || v == nil { + return fmt.Errorf("field iamRoleArn in SpecDistributionModulesAwsLoadBalancerController: required") } - type Plain SpecDistributionModulesAuthProviderBasicAuth + type Plain SpecDistributionModulesAwsLoadBalancerController var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesAuthProviderBasicAuth(plain) + *j = SpecDistributionModulesAwsLoadBalancerController(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolAdditionalFirewallRules) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesAwsAuthUser) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - type Plain SpecKubernetesNodePoolAdditionalFirewallRules - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err + if v, ok := raw["groups"]; !ok || v == nil { + return fmt.Errorf("field groups in SpecKubernetesAwsAuthUser: required") } - if plain.CidrBlocks != nil && len(plain.CidrBlocks) < 1 { - return fmt.Errorf("field %s length: must be >= %d", "cidrBlocks", 1) + if v, ok := raw["userarn"]; !ok || v == nil { + return fmt.Errorf("field userarn in SpecKubernetesAwsAuthUser: required") } - if plain.Self != nil && len(plain.Self) < 1 { - return fmt.Errorf("field %s length: must be >= %d", "self", 1) + if v, ok := raw["username"]; !ok || v == nil { + return fmt.Errorf("field username in SpecKubernetesAwsAuthUser: required") } - if plain.SourceSecurityGroupId != nil && len(plain.SourceSecurityGroupId) < 1 { - return fmt.Errorf("field %s length: must be >= %d", "sourceSecurityGroupId", 1) + type Plain SpecKubernetesAwsAuthUser + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err } - *j = SpecKubernetesNodePoolAdditionalFirewallRules(plain) + *j = SpecKubernetesAwsAuthUser(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolAdditionalFirewallRulePorts) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesAwsEbsCsiDriver) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["from"]; !ok || v == nil { - return fmt.Errorf("field from in SpecKubernetesNodePoolAdditionalFirewallRulePorts: required") - } - if v, ok := raw["to"]; !ok || v == nil { - return fmt.Errorf("field to in SpecKubernetesNodePoolAdditionalFirewallRulePorts: required") + if v, ok := raw["iamRoleArn"]; !ok || v == nil { + return fmt.Errorf("field iamRoleArn in SpecDistributionModulesAwsEbsCsiDriver: required") } - type Plain SpecKubernetesNodePoolAdditionalFirewallRulePorts + type Plain SpecDistributionModulesAwsEbsCsiDriver var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecKubernetesNodePoolAdditionalFirewallRulePorts(plain) + *j = SpecDistributionModulesAwsEbsCsiDriver(plain) return nil } +const TypesAwsRegionUsWest2 TypesAwsRegion = "us-west-2" + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolAmi) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesAwsClusterAutoscaler) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["id"]; !ok || v == nil { - return fmt.Errorf("field id in SpecKubernetesNodePoolAmi: required") - } - if v, ok := raw["owner"]; !ok || v == nil { - return fmt.Errorf("field owner in SpecKubernetesNodePoolAmi: required") + if v, ok := raw["iamRoleArn"]; !ok || v == nil { + return fmt.Errorf("field iamRoleArn in SpecDistributionModulesAwsClusterAutoscaler: required") } - type Plain SpecKubernetesNodePoolAmi + type Plain SpecDistributionModulesAwsClusterAutoscaler var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecKubernetesNodePoolAmi(plain) + *j = SpecDistributionModulesAwsClusterAutoscaler(plain) return nil } +var enumValues_SpecKubernetesLogRetentionDays = []interface{}{ + 0, + 1, + 3, + 5, + 7, + 14, + 30, + 60, + 90, + 120, + 150, + 180, + 365, + 400, + 545, + 731, + 1096, + 1827, + 2192, + 2557, + 2922, + 3288, + 3653, +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuthProviderType) UnmarshalJSON(b []byte) error { - var v string +func (j *SpecKubernetesLogRetentionDays) UnmarshalJSON(b []byte) error { + var v int if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesAuthProviderType { + for _, expected := range enumValues_SpecKubernetesLogRetentionDays { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesAuthProviderType, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesLogRetentionDays, v) } - *j = SpecDistributionModulesAuthProviderType(v) + *j = SpecKubernetesLogRetentionDays(v) return nil } -var enumValues_SpecKubernetesNodePoolContainerRuntime = []interface{}{ - "docker", - "containerd", +type TypesFuryModuleComponentOverridesWithIAMRoleName struct { + // IamRoleName corresponds to the JSON schema field "iamRoleName". + IamRoleName *TypesAwsIamRoleName `json:"iamRoleName,omitempty" yaml:"iamRoleName,omitempty" mapstructure:"iamRoleName,omitempty"` + + // The node selector to use to place the pods for the load balancer controller + // module. + NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` + + // The tolerations that will be added to the pods for the cluster autoscaler + // module. + Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` +} + +var enumValues_SpecKubernetesLogsTypesElem = []interface{}{ + "api", + "audit", + "authenticator", + "controllerManager", + "scheduler", } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolContainerRuntime) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesLogsTypesElem) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecKubernetesNodePoolContainerRuntime { + for _, expected := range enumValues_SpecKubernetesLogsTypesElem { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolContainerRuntime, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesLogsTypesElem, v) } - *j = SpecKubernetesNodePoolContainerRuntime(v) + *j = SpecKubernetesLogsTypesElem(v) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuthProvider) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesMonitoring) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesAuthProvider: required") + return fmt.Errorf("field type in SpecDistributionModulesMonitoring: required") } - type Plain SpecDistributionModulesAuthProvider + type Plain SpecDistributionModulesMonitoring var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesAuthProvider(plain) + *j = SpecDistributionModulesMonitoring(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuth) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesDrVeleroEks) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["provider"]; !ok || v == nil { - return fmt.Errorf("field provider in SpecDistributionModulesAuth: required") - } - type Plain SpecDistributionModulesAuth - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistributionModulesAuth(plain) - return nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesLogsTypesElem) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err + if v, ok := raw["bucketName"]; !ok || v == nil { + return fmt.Errorf("field bucketName in SpecDistributionModulesDrVeleroEks: required") } - var ok bool - for _, expected := range enumValues_SpecKubernetesLogsTypesElem { - if reflect.DeepEqual(v, expected) { - ok = true - break - } + if v, ok := raw["iamRoleArn"]; !ok || v == nil { + return fmt.Errorf("field iamRoleArn in SpecDistributionModulesDrVeleroEks: required") } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesLogsTypesElem, v) + if v, ok := raw["region"]; !ok || v == nil { + return fmt.Errorf("field region in SpecDistributionModulesDrVeleroEks: required") } - *j = SpecKubernetesLogsTypesElem(v) - return nil -} - -var enumValues_SpecKubernetesNodePoolInstanceVolumeType = []interface{}{ - "gp2", - "gp3", - "io1", - "standard", -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolInstanceVolumeType) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { + type Plain SpecDistributionModulesDrVeleroEks + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { return err } - var ok bool - for _, expected := range enumValues_SpecKubernetesNodePoolInstanceVolumeType { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolInstanceVolumeType, v) - } - *j = SpecKubernetesNodePoolInstanceVolumeType(v) + *j = SpecDistributionModulesDrVeleroEks(plain) return nil } -var enumValues_SpecKubernetesLogsTypesElem = []interface{}{ - "api", - "audit", - "authenticator", - "controllerManager", - "scheduler", -} - // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAwsClusterAutoscaler) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesAuth) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["iamRoleArn"]; !ok || v == nil { - return fmt.Errorf("field iamRoleArn in SpecDistributionModulesAwsClusterAutoscaler: required") + if v, ok := raw["provider"]; !ok || v == nil { + return fmt.Errorf("field provider in SpecDistributionModulesAuth: required") } - type Plain SpecDistributionModulesAwsClusterAutoscaler + type Plain SpecDistributionModulesAuth var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesAwsClusterAutoscaler(plain) + *j = SpecDistributionModulesAuth(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesAwsAuthUser) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesAuthProvider) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["groups"]; !ok || v == nil { - return fmt.Errorf("field groups in SpecKubernetesAwsAuthUser: required") - } - if v, ok := raw["userarn"]; !ok || v == nil { - return fmt.Errorf("field userarn in SpecKubernetesAwsAuthUser: required") - } - if v, ok := raw["username"]; !ok || v == nil { - return fmt.Errorf("field username in SpecKubernetesAwsAuthUser: required") + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesAuthProvider: required") } - type Plain SpecKubernetesAwsAuthUser + type Plain SpecDistributionModulesAuthProvider var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecKubernetesAwsAuthUser(plain) + *j = SpecDistributionModulesAuthProvider(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAwsEbsCsiDriver) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { +func (j *SpecDistributionModulesAuthProviderType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { return err } - if v, ok := raw["iamRoleArn"]; !ok || v == nil { - return fmt.Errorf("field iamRoleArn in SpecDistributionModulesAwsEbsCsiDriver: required") + var ok bool + for _, expected := range enumValues_SpecDistributionModulesAuthProviderType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } } - type Plain SpecDistributionModulesAwsEbsCsiDriver - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesAuthProviderType, v) } - *j = SpecDistributionModulesAwsEbsCsiDriver(plain) + *j = SpecDistributionModulesAuthProviderType(v) return nil } +var enumValues_SpecDistributionModulesAuthProviderType = []interface{}{ + "none", + "basicAuth", + "sso", +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesAwsAuthRole) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesNodePoolAdditionalFirewallRulePorts) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["groups"]; !ok || v == nil { - return fmt.Errorf("field groups in SpecKubernetesAwsAuthRole: required") - } - if v, ok := raw["rolearn"]; !ok || v == nil { - return fmt.Errorf("field rolearn in SpecKubernetesAwsAuthRole: required") + if v, ok := raw["from"]; !ok || v == nil { + return fmt.Errorf("field from in SpecKubernetesNodePoolAdditionalFirewallRulePorts: required") } - if v, ok := raw["username"]; !ok || v == nil { - return fmt.Errorf("field username in SpecKubernetesAwsAuthRole: required") + if v, ok := raw["to"]; !ok || v == nil { + return fmt.Errorf("field to in SpecKubernetesNodePoolAdditionalFirewallRulePorts: required") } - type Plain SpecKubernetesAwsAuthRole + type Plain SpecKubernetesNodePoolAdditionalFirewallRulePorts var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecKubernetesAwsAuthRole(plain) + *j = SpecKubernetesNodePoolAdditionalFirewallRulePorts(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolInstance) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesLoggingOpensearch) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecKubernetesNodePoolInstance: required") + return fmt.Errorf("field type in SpecDistributionModulesLoggingOpensearch: required") } - type Plain SpecKubernetesNodePoolInstance + type Plain SpecDistributionModulesLoggingOpensearch var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecKubernetesNodePoolInstance(plain) + *j = SpecDistributionModulesLoggingOpensearch(plain) return nil } +type TypesAwsTags map[string]string + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAwsLoadBalancerController) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesAuthProviderBasicAuth) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["iamRoleArn"]; !ok || v == nil { - return fmt.Errorf("field iamRoleArn in SpecDistributionModulesAwsLoadBalancerController: required") + if v, ok := raw["password"]; !ok || v == nil { + return fmt.Errorf("field password in SpecDistributionModulesAuthProviderBasicAuth: required") } - type Plain SpecDistributionModulesAwsLoadBalancerController + if v, ok := raw["username"]; !ok || v == nil { + return fmt.Errorf("field username in SpecDistributionModulesAuthProviderBasicAuth: required") + } + type Plain SpecDistributionModulesAuthProviderBasicAuth var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesAwsLoadBalancerController(plain) + *j = SpecDistributionModulesAuthProviderBasicAuth(plain) return nil } +var enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType = []interface{}{ + "ingress", + "egress", +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesAPIServer) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { +func (j *SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { return err } - if v, ok := raw["privateAccess"]; !ok || v == nil { - return fmt.Errorf("field privateAccess in SpecKubernetesAPIServer: required") - } - if v, ok := raw["publicAccess"]; !ok || v == nil { - return fmt.Errorf("field publicAccess in SpecKubernetesAPIServer: required") + var ok bool + for _, expected := range enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } } - type Plain SpecKubernetesAPIServer - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType, v) } - *j = SpecKubernetesAPIServer(plain) + *j = SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType(v) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolSize) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesAuthOverridesIngress) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["max"]; !ok || v == nil { - return fmt.Errorf("field max in SpecKubernetesNodePoolSize: required") + if v, ok := raw["host"]; !ok || v == nil { + return fmt.Errorf("field host in SpecDistributionModulesAuthOverridesIngress: required") } - if v, ok := raw["min"]; !ok || v == nil { - return fmt.Errorf("field min in SpecKubernetesNodePoolSize: required") + if v, ok := raw["ingressClass"]; !ok || v == nil { + return fmt.Errorf("field ingressClass in SpecDistributionModulesAuthOverridesIngress: required") } - type Plain SpecKubernetesNodePoolSize + type Plain SpecDistributionModulesAuthOverridesIngress var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecKubernetesNodePoolSize(plain) + *j = SpecDistributionModulesAuthOverridesIngress(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecInfrastructureVpn) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesAuthDex) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["ssh"]; !ok || v == nil { - return fmt.Errorf("field ssh in SpecInfrastructureVpn: required") - } - if v, ok := raw["vpnClientsSubnetCidr"]; !ok || v == nil { - return fmt.Errorf("field vpnClientsSubnetCidr in SpecInfrastructureVpn: required") + if v, ok := raw["connectors"]; !ok || v == nil { + return fmt.Errorf("field connectors in SpecDistributionModulesAuthDex: required") } - type Plain SpecInfrastructureVpn + type Plain SpecDistributionModulesAuthDex var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecInfrastructureVpn(plain) + *j = SpecDistributionModulesAuthDex(plain) return nil } +type TypesFuryModuleComponentOverrides struct { + // Set to override the node selector used to place the pods of the package. + NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` + + // Set to override the tolerations that will be added to the pods of the package. + Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecInfrastructureVpnSsh) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["allowedFromCidrs"]; !ok || v == nil { - return fmt.Errorf("field allowedFromCidrs in SpecInfrastructureVpnSsh: required") + if v, ok := raw["cidrBlocks"]; !ok || v == nil { + return fmt.Errorf("field cidrBlocks in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required") } - if v, ok := raw["githubUsersName"]; !ok || v == nil { - return fmt.Errorf("field githubUsersName in SpecInfrastructureVpnSsh: required") + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required") } - type Plain SpecInfrastructureVpnSsh + if v, ok := raw["ports"]; !ok || v == nil { + return fmt.Errorf("field ports in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required") + } + if v, ok := raw["protocol"]; !ok || v == nil { + return fmt.Errorf("field protocol in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required") + } + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required") + } + type Plain SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - if plain.GithubUsersName != nil && len(plain.GithubUsersName) < 1 { - return fmt.Errorf("field %s length: must be >= %d", "githubUsersName", 1) + if plain.CidrBlocks != nil && len(plain.CidrBlocks) < 1 { + return fmt.Errorf("field %s length: must be >= %d", "cidrBlocks", 1) } - *j = SpecInfrastructureVpnSsh(plain) + *j = SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAws) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionCustomPatchesSecretGeneratorResource) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["clusterAutoscaler"]; !ok || v == nil { - return fmt.Errorf("field clusterAutoscaler in SpecDistributionModulesAws: required") - } - if v, ok := raw["ebsCsiDriver"]; !ok || v == nil { - return fmt.Errorf("field ebsCsiDriver in SpecDistributionModulesAws: required") - } - if v, ok := raw["loadBalancerController"]; !ok || v == nil { - return fmt.Errorf("field loadBalancerController in SpecDistributionModulesAws: required") - } - if v, ok := raw["overrides"]; !ok || v == nil { - return fmt.Errorf("field overrides in SpecDistributionModulesAws: required") + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecDistributionCustomPatchesSecretGeneratorResource: required") } - type Plain SpecDistributionModulesAws + type Plain SpecDistributionCustomPatchesSecretGeneratorResource var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesAws(plain) + *j = SpecDistributionCustomPatchesSecretGeneratorResource(plain) return nil } -var enumValues_SpecKubernetesNodePoolType = []interface{}{ - "eks-managed", - "self-managed", +var enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSelfType = []interface{}{ + "ingress", + "egress", } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolType) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesNodePoolAdditionalFirewallRuleSelfType) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecKubernetesNodePoolType { + for _, expected := range enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSelfType { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolType, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSelfType, v) } - *j = SpecKubernetesNodePoolType(v) + *j = SpecKubernetesNodePoolAdditionalFirewallRuleSelfType(v) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecInfrastructureVpc) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesDrVelero) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["network"]; !ok || v == nil { - return fmt.Errorf("field network in SpecInfrastructureVpc: required") + if v, ok := raw["eks"]; !ok || v == nil { + return fmt.Errorf("field eks in SpecDistributionModulesDrVelero: required") } - type Plain SpecInfrastructureVpc + type Plain SpecDistributionModulesDrVelero var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecInfrastructureVpc(plain) + *j = SpecDistributionModulesDrVelero(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecInfrastructureVpcNetwork) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionCustomPatchesSecretGeneratorResourceBehavior) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior, v) + } + *j = SpecDistributionCustomPatchesSecretGeneratorResourceBehavior(v) + return nil +} + +var enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior = []interface{}{ + "create", + "replace", + "merge", +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecKubernetesNodePoolAdditionalFirewallRuleSelf) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["cidr"]; !ok || v == nil { - return fmt.Errorf("field cidr in SpecInfrastructureVpcNetwork: required") + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required") } - if v, ok := raw["subnetsCidrs"]; !ok || v == nil { - return fmt.Errorf("field subnetsCidrs in SpecInfrastructureVpcNetwork: required") + if v, ok := raw["ports"]; !ok || v == nil { + return fmt.Errorf("field ports in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required") } - type Plain SpecInfrastructureVpcNetwork + if v, ok := raw["protocol"]; !ok || v == nil { + return fmt.Errorf("field protocol in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required") + } + if v, ok := raw["self"]; !ok || v == nil { + return fmt.Errorf("field self in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required") + } + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required") + } + type Plain SpecKubernetesNodePoolAdditionalFirewallRuleSelf var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecInfrastructureVpcNetwork(plain) + *j = SpecKubernetesNodePoolAdditionalFirewallRuleSelf(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionCustomPatchesConfigMapGeneratorResource) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesDr) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecDistributionCustomPatchesConfigMapGeneratorResource: required") + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesDr: required") } - type Plain SpecDistributionCustomPatchesConfigMapGeneratorResource + type Plain SpecDistributionModulesDr var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionCustomPatchesConfigMapGeneratorResource(plain) + *j = SpecDistributionModulesDr(plain) return nil } +var enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType = []interface{}{ + "ingress", + "egress", +} + +const TypesAwsRegionUsWest1 TypesAwsRegion = "us-west-1" + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePool) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesIngressClusterIssuerRoute53) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["instance"]; !ok || v == nil { - return fmt.Errorf("field instance in SpecKubernetesNodePool: required") + if v, ok := raw["hostedZoneId"]; !ok || v == nil { + return fmt.Errorf("field hostedZoneId in SpecDistributionModulesIngressClusterIssuerRoute53: required") } - if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecKubernetesNodePool: required") + if v, ok := raw["iamRoleArn"]; !ok || v == nil { + return fmt.Errorf("field iamRoleArn in SpecDistributionModulesIngressClusterIssuerRoute53: required") } - if v, ok := raw["size"]; !ok || v == nil { - return fmt.Errorf("field size in SpecKubernetesNodePool: required") + if v, ok := raw["region"]; !ok || v == nil { + return fmt.Errorf("field region in SpecDistributionModulesIngressClusterIssuerRoute53: required") } - type Plain SpecKubernetesNodePool + type Plain SpecDistributionModulesIngressClusterIssuerRoute53 var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecKubernetesNodePool(plain) + *j = SpecDistributionModulesIngressClusterIssuerRoute53(plain) return nil } +var enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType = []interface{}{ + "dns01", + "http01", +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesDrType) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesIngressCertManagerClusterIssuerType) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesDrType { + for _, expected := range enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesDrType, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType, v) } - *j = SpecDistributionModulesDrType(v) + *j = SpecDistributionModulesIngressCertManagerClusterIssuerType(v) return nil } -var enumValues_SpecKubernetesNodePoolsLaunchKind = []interface{}{ - "launch_configurations", - "launch_templates", - "both", +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required") + } + if v, ok := raw["ports"]; !ok || v == nil { + return fmt.Errorf("field ports in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required") + } + if v, ok := raw["protocol"]; !ok || v == nil { + return fmt.Errorf("field protocol in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required") + } + if v, ok := raw["sourceSecurityGroupId"]; !ok || v == nil { + return fmt.Errorf("field sourceSecurityGroupId in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required") + } + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required") + } + type Plain SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId(plain) + return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolsLaunchKind) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesMonitoringType) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecKubernetesNodePoolsLaunchKind { + for _, expected := range enumValues_SpecDistributionModulesMonitoringType { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolsLaunchKind, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesMonitoringType, v) } - *j = SpecKubernetesNodePoolsLaunchKind(v) + *j = SpecDistributionModulesMonitoringType(v) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecInfrastructureVpcNetworkSubnetsCidrs) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesNodePoolAdditionalFirewallRules) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["private"]; !ok || v == nil { - return fmt.Errorf("field private in SpecInfrastructureVpcNetworkSubnetsCidrs: required") - } - if v, ok := raw["public"]; !ok || v == nil { - return fmt.Errorf("field public in SpecInfrastructureVpcNetworkSubnetsCidrs: required") - } - type Plain SpecInfrastructureVpcNetworkSubnetsCidrs + type Plain SpecKubernetesNodePoolAdditionalFirewallRules var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecInfrastructureVpcNetworkSubnetsCidrs(plain) + if plain.CidrBlocks != nil && len(plain.CidrBlocks) < 1 { + return fmt.Errorf("field %s length: must be >= %d", "cidrBlocks", 1) + } + if len(plain.CidrBlocks) > 1 { + return fmt.Errorf("field %s length: must be <= %d", "cidrBlocks", 1) + } + if plain.Self != nil && len(plain.Self) < 1 { + return fmt.Errorf("field %s length: must be >= %d", "self", 1) + } + if plain.SourceSecurityGroupId != nil && len(plain.SourceSecurityGroupId) < 1 { + return fmt.Errorf("field %s length: must be >= %d", "sourceSecurityGroupId", 1) + } + *j = SpecKubernetesNodePoolAdditionalFirewallRules(plain) return nil } +var enumValues_SpecDistributionModulesMonitoringType = []interface{}{ + "none", + "prometheus", + "prometheusAgent", + "mimir", +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistribution) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesNodePoolAmi) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["modules"]; !ok || v == nil { - return fmt.Errorf("field modules in SpecDistribution: required") + if v, ok := raw["id"]; !ok || v == nil { + return fmt.Errorf("field id in SpecKubernetesNodePoolAmi: required") } - type Plain SpecDistribution + if v, ok := raw["owner"]; !ok || v == nil { + return fmt.Errorf("field owner in SpecKubernetesNodePoolAmi: required") + } + type Plain SpecKubernetesNodePoolAmi var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistribution(plain) + *j = SpecKubernetesNodePoolAmi(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModules) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesIngressCertManagerClusterIssuer) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["dr"]; !ok || v == nil { - return fmt.Errorf("field dr in SpecDistributionModules: required") - } - if v, ok := raw["ingress"]; !ok || v == nil { - return fmt.Errorf("field ingress in SpecDistributionModules: required") + if v, ok := raw["email"]; !ok || v == nil { + return fmt.Errorf("field email in SpecDistributionModulesIngressCertManagerClusterIssuer: required") } - if v, ok := raw["logging"]; !ok || v == nil { - return fmt.Errorf("field logging in SpecDistributionModules: required") + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecDistributionModulesIngressCertManagerClusterIssuer: required") } - if v, ok := raw["policy"]; !ok || v == nil { - return fmt.Errorf("field policy in SpecDistributionModules: required") + if v, ok := raw["route53"]; !ok || v == nil { + return fmt.Errorf("field route53 in SpecDistributionModulesIngressCertManagerClusterIssuer: required") } - type Plain SpecDistributionModules + type Plain SpecDistributionModulesIngressCertManagerClusterIssuer var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModules(plain) + *j = SpecDistributionModulesIngressCertManagerClusterIssuer(plain) return nil } -var enumValues_TypesAwsRegion = []interface{}{ - "af-south-1", - "ap-east-1", - "ap-northeast-1", - "ap-northeast-2", - "ap-northeast-3", - "ap-south-1", - "ap-south-2", - "ap-southeast-1", - "ap-southeast-2", - "ap-southeast-3", - "ap-southeast-4", - "ca-central-1", - "eu-central-1", - "eu-central-2", - "eu-north-1", - "eu-south-1", - "eu-south-2", - "eu-west-1", - "eu-west-2", - "eu-west-3", - "me-central-1", - "me-south-1", - "sa-east-1", - "us-east-1", - "us-east-2", - "us-gov-east-1", - "us-gov-west-1", - "us-west-1", - "us-west-2", +var enumValues_SpecKubernetesNodePoolContainerRuntime = []interface{}{ + "docker", + "containerd", } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetes) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { +func (j *SpecKubernetesNodePoolContainerRuntime) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { return err } - if v, ok := raw["apiServer"]; !ok || v == nil { - return fmt.Errorf("field apiServer in SpecKubernetes: required") - } - if v, ok := raw["nodeAllowedSshPublicKey"]; !ok || v == nil { - return fmt.Errorf("field nodeAllowedSshPublicKey in SpecKubernetes: required") - } - if v, ok := raw["nodePools"]; !ok || v == nil { - return fmt.Errorf("field nodePools in SpecKubernetes: required") - } - if v, ok := raw["nodePoolsLaunchKind"]; !ok || v == nil { - return fmt.Errorf("field nodePoolsLaunchKind in SpecKubernetes: required") + var ok bool + for _, expected := range enumValues_SpecKubernetesNodePoolContainerRuntime { + if reflect.DeepEqual(v, expected) { + ok = true + break + } } - type Plain SpecKubernetes - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolContainerRuntime, v) } - *j = SpecKubernetes(plain) + *j = SpecKubernetesNodePoolContainerRuntime(v) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesTracing) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesIngressCertManager) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesTracing: required") + if v, ok := raw["clusterIssuer"]; !ok || v == nil { + return fmt.Errorf("field clusterIssuer in SpecDistributionModulesIngressCertManager: required") } - type Plain SpecDistributionModulesTracing + type Plain SpecDistributionModulesIngressCertManager var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesTracing(plain) + *j = SpecDistributionModulesIngressCertManager(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecPluginsHelmReleasesElemSetElem) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesIngressDNSPrivate) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } + if v, ok := raw["create"]; !ok || v == nil { + return fmt.Errorf("field create in SpecDistributionModulesIngressDNSPrivate: required") + } if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecPluginsHelmReleasesElemSetElem: required") + return fmt.Errorf("field name in SpecDistributionModulesIngressDNSPrivate: required") } - if v, ok := raw["value"]; !ok || v == nil { - return fmt.Errorf("field value in SpecPluginsHelmReleasesElemSetElem: required") + if v, ok := raw["vpcId"]; !ok || v == nil { + return fmt.Errorf("field vpcId in SpecDistributionModulesIngressDNSPrivate: required") } - type Plain SpecPluginsHelmReleasesElemSetElem + type Plain SpecDistributionModulesIngressDNSPrivate var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecPluginsHelmReleasesElemSetElem(plain) + *j = SpecDistributionModulesIngressDNSPrivate(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *TypesAwsRegion) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { +func (j *SpecDistributionModulesIngressDNSPublic) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { return err } - var ok bool - for _, expected := range enumValues_TypesAwsRegion { - if reflect.DeepEqual(v, expected) { - ok = true - break - } + if v, ok := raw["create"]; !ok || v == nil { + return fmt.Errorf("field create in SpecDistributionModulesIngressDNSPublic: required") + } + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecDistributionModulesIngressDNSPublic: required") } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesAwsRegion, v) + type Plain SpecDistributionModulesIngressDNSPublic + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err } - *j = TypesAwsRegion(v) + *j = SpecDistributionModulesIngressDNSPublic(plain) return nil } +var enumValues_SpecKubernetesNodePoolInstanceVolumeType = []interface{}{ + "gp2", + "gp3", + "io1", + "standard", +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesTracingType) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesNodePoolInstanceVolumeType) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesTracingType { + for _, expected := range enumValues_SpecKubernetesNodePoolInstanceVolumeType { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesTracingType, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolInstanceVolumeType, v) } - *j = SpecDistributionModulesTracingType(v) + *j = SpecKubernetesNodePoolInstanceVolumeType(v) return nil } -var enumValues_SpecDistributionModulesTracingType = []interface{}{ - "none", - "tempo", -} - // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesTracingTempoBackend) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { +func (j *SpecDistributionModulesIngressDNS) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { return err } - var ok bool - for _, expected := range enumValues_SpecDistributionModulesTracingTempoBackend { - if reflect.DeepEqual(v, expected) { - ok = true - break - } + if v, ok := raw["private"]; !ok || v == nil { + return fmt.Errorf("field private in SpecDistributionModulesIngressDNS: required") } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesTracingTempoBackend, v) + if v, ok := raw["public"]; !ok || v == nil { + return fmt.Errorf("field public in SpecDistributionModulesIngressDNS: required") } - *j = SpecDistributionModulesTracingTempoBackend(v) + type Plain SpecDistributionModulesIngressDNS + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesIngressDNS(plain) return nil } -var enumValues_SpecDistributionModulesTracingTempoBackend = []interface{}{ - "minio", - "externalEndpoint", -} - // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesPolicy) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesIngressExternalDNS) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesPolicy: required") + if v, ok := raw["privateIamRoleArn"]; !ok || v == nil { + return fmt.Errorf("field privateIamRoleArn in SpecDistributionModulesIngressExternalDNS: required") } - type Plain SpecDistributionModulesPolicy + if v, ok := raw["publicIamRoleArn"]; !ok || v == nil { + return fmt.Errorf("field publicIamRoleArn in SpecDistributionModulesIngressExternalDNS: required") + } + type Plain SpecDistributionModulesIngressExternalDNS var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesPolicy(plain) + *j = SpecDistributionModulesIngressExternalDNS(plain) return nil } +var enumValues_SpecDistributionModulesIngressNginxTLSProvider = []interface{}{ + "certManager", + "secret", + "none", +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesPolicyType) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesIngressNginxTLSProvider) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesPolicyType { + for _, expected := range enumValues_SpecDistributionModulesIngressNginxTLSProvider { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesPolicyType, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressNginxTLSProvider, v) } - *j = SpecDistributionModulesPolicyType(v) + *j = SpecDistributionModulesIngressNginxTLSProvider(v) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecToolsConfigurationTerraformStateS3) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesIngressNginxTLSSecret) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["bucketName"]; !ok || v == nil { - return fmt.Errorf("field bucketName in SpecToolsConfigurationTerraformStateS3: required") + if v, ok := raw["ca"]; !ok || v == nil { + return fmt.Errorf("field ca in SpecDistributionModulesIngressNginxTLSSecret: required") } - if v, ok := raw["keyPrefix"]; !ok || v == nil { - return fmt.Errorf("field keyPrefix in SpecToolsConfigurationTerraformStateS3: required") + if v, ok := raw["cert"]; !ok || v == nil { + return fmt.Errorf("field cert in SpecDistributionModulesIngressNginxTLSSecret: required") } - if v, ok := raw["region"]; !ok || v == nil { - return fmt.Errorf("field region in SpecToolsConfigurationTerraformStateS3: required") + if v, ok := raw["key"]; !ok || v == nil { + return fmt.Errorf("field key in SpecDistributionModulesIngressNginxTLSSecret: required") } - type Plain SpecToolsConfigurationTerraformStateS3 + type Plain SpecDistributionModulesIngressNginxTLSSecret var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecToolsConfigurationTerraformStateS3(plain) + *j = SpecDistributionModulesIngressNginxTLSSecret(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesLoggingOpensearch) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesNodePoolInstance) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesLoggingOpensearch: required") + return fmt.Errorf("field type in SpecKubernetesNodePoolInstance: required") } - type Plain SpecDistributionModulesLoggingOpensearch + type Plain SpecKubernetesNodePoolInstance var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesLoggingOpensearch(plain) + *j = SpecKubernetesNodePoolInstance(plain) return nil } +type TypesKubeLabels_1 map[string]string + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecToolsConfigurationTerraformState) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesIngressNginxTLS) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["s3"]; !ok || v == nil { - return fmt.Errorf("field s3 in SpecToolsConfigurationTerraformState: required") + if v, ok := raw["provider"]; !ok || v == nil { + return fmt.Errorf("field provider in SpecDistributionModulesIngressNginxTLS: required") } - type Plain SpecToolsConfigurationTerraformState + type Plain SpecDistributionModulesIngressNginxTLS var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecToolsConfigurationTerraformState(plain) + *j = SpecDistributionModulesIngressNginxTLS(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesPolicyKyverno) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesNodePoolSize) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["installDefaultPolicies"]; !ok || v == nil { - return fmt.Errorf("field installDefaultPolicies in SpecDistributionModulesPolicyKyverno: required") + if v, ok := raw["max"]; !ok || v == nil { + return fmt.Errorf("field max in SpecKubernetesNodePoolSize: required") } - if v, ok := raw["validationFailureAction"]; !ok || v == nil { - return fmt.Errorf("field validationFailureAction in SpecDistributionModulesPolicyKyverno: required") + if v, ok := raw["min"]; !ok || v == nil { + return fmt.Errorf("field min in SpecKubernetesNodePoolSize: required") } - type Plain SpecDistributionModulesPolicyKyverno + type Plain SpecKubernetesNodePoolSize var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesPolicyKyverno(plain) + *j = SpecKubernetesNodePoolSize(plain) return nil } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecToolsConfigurationTerraform) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["state"]; !ok || v == nil { - return fmt.Errorf("field state in SpecToolsConfigurationTerraform: required") - } - type Plain SpecToolsConfigurationTerraform - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecToolsConfigurationTerraform(plain) - return nil +type TypesAwsSubnetId string + +type TypesKubeTaints []string + +var enumValues_SpecDistributionModulesIngressNginxType = []interface{}{ + "none", + "single", + "dual", +} + +var enumValues_SpecKubernetesNodePoolType = []interface{}{ + "eks-managed", + "self-managed", } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesPolicyKyvernoValidationFailureAction) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesNodePoolType) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction { + for _, expected := range enumValues_SpecKubernetesNodePoolType { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction, v) - } - *j = SpecDistributionModulesPolicyKyvernoValidationFailureAction(v) - return nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecToolsConfiguration) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["terraform"]; !ok || v == nil { - return fmt.Errorf("field terraform in SpecToolsConfiguration: required") - } - type Plain SpecToolsConfiguration - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolType, v) } - *j = SpecToolsConfiguration(plain) + *j = SpecKubernetesNodePoolType(v) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesMonitoringMimirBackend) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior { + for _, expected := range enumValues_SpecDistributionModulesMonitoringMimirBackend { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesMonitoringMimirBackend, v) } - *j = SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior(v) + *j = SpecDistributionModulesMonitoringMimirBackend(v) return nil } -// UnmarshalJSON implements json.Unmarshaler. -func (j *Spec) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["distribution"]; !ok || v == nil { - return fmt.Errorf("field distribution in Spec: required") - } - if v, ok := raw["distributionVersion"]; !ok || v == nil { - return fmt.Errorf("field distributionVersion in Spec: required") - } - if v, ok := raw["kubernetes"]; !ok || v == nil { - return fmt.Errorf("field kubernetes in Spec: required") - } - if v, ok := raw["region"]; !ok || v == nil { - return fmt.Errorf("field region in Spec: required") - } - if v, ok := raw["toolsConfiguration"]; !ok || v == nil { - return fmt.Errorf("field toolsConfiguration in Spec: required") - } - type Plain Spec - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - if len(plain.DistributionVersion) < 1 { - return fmt.Errorf("field %s length: must be >= %d", "distributionVersion", 1) - } - *j = Spec(plain) - return nil +var enumValues_SpecDistributionModulesMonitoringMimirBackend = []interface{}{ + "minio", + "externalEndpoint", } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesPolicyGatekeeper) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionCustomPatchesConfigMapGeneratorResource) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["enforcementAction"]; !ok || v == nil { - return fmt.Errorf("field enforcementAction in SpecDistributionModulesPolicyGatekeeper: required") - } - if v, ok := raw["installDefaultPolicies"]; !ok || v == nil { - return fmt.Errorf("field installDefaultPolicies in SpecDistributionModulesPolicyGatekeeper: required") + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecDistributionCustomPatchesConfigMapGeneratorResource: required") } - type Plain SpecDistributionModulesPolicyGatekeeper + type Plain SpecDistributionCustomPatchesConfigMapGeneratorResource var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesPolicyGatekeeper(plain) + *j = SpecDistributionCustomPatchesConfigMapGeneratorResource(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *TypesKubeToleration) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesNodePool) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["effect"]; !ok || v == nil { - return fmt.Errorf("field effect in TypesKubeToleration: required") + if v, ok := raw["instance"]; !ok || v == nil { + return fmt.Errorf("field instance in SpecKubernetesNodePool: required") + } + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecKubernetesNodePool: required") } - if v, ok := raw["key"]; !ok || v == nil { - return fmt.Errorf("field key in TypesKubeToleration: required") + if v, ok := raw["size"]; !ok || v == nil { + return fmt.Errorf("field size in SpecKubernetesNodePool: required") } - type Plain TypesKubeToleration + type Plain SpecKubernetesNodePool var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = TypesKubeToleration(plain) + *j = SpecKubernetesNodePool(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesPolicyGatekeeperEnforcementAction) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesIngressNginxType) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesPolicyGatekeeperEnforcementAction { + for _, expected := range enumValues_SpecDistributionModulesIngressNginxType { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesPolicyGatekeeperEnforcementAction, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressNginxType, v) } - *j = SpecDistributionModulesPolicyGatekeeperEnforcementAction(v) + *j = SpecDistributionModulesIngressNginxType(v) return nil } +var enumValues_SpecKubernetesNodePoolsLaunchKind = []interface{}{ + "launch_configurations", + "launch_templates", + "both", +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesNetworkingType) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesNodePoolsLaunchKind) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesNetworkingType { + for _, expected := range enumValues_SpecKubernetesNodePoolsLaunchKind { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesNetworkingType, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolsLaunchKind, v) } - *j = SpecDistributionModulesNetworkingType(v) + *j = SpecKubernetesNodePoolsLaunchKind(v) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesMonitoring) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesIngressNginx) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesMonitoring: required") + return fmt.Errorf("field type in SpecDistributionModulesIngressNginx: required") } - type Plain SpecDistributionModulesMonitoring + type Plain SpecDistributionModulesIngressNginx var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesMonitoring(plain) - return nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *TypesKubeTolerationOperator) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_TypesKubeTolerationOperator { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesKubeTolerationOperator, v) - } - *j = TypesKubeTolerationOperator(v) + *j = SpecDistributionModulesIngressNginx(plain) return nil } -var enumValues_TypesKubeTolerationOperator = []interface{}{ - "Exists", - "Equal", -} - // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesDrVeleroEks) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesIngress) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["bucketName"]; !ok || v == nil { - return fmt.Errorf("field bucketName in SpecDistributionModulesDrVeleroEks: required") + if v, ok := raw["baseDomain"]; !ok || v == nil { + return fmt.Errorf("field baseDomain in SpecDistributionModulesIngress: required") } - if v, ok := raw["iamRoleArn"]; !ok || v == nil { - return fmt.Errorf("field iamRoleArn in SpecDistributionModulesDrVeleroEks: required") + if v, ok := raw["certManager"]; !ok || v == nil { + return fmt.Errorf("field certManager in SpecDistributionModulesIngress: required") } - if v, ok := raw["region"]; !ok || v == nil { - return fmt.Errorf("field region in SpecDistributionModulesDrVeleroEks: required") + if v, ok := raw["dns"]; !ok || v == nil { + return fmt.Errorf("field dns in SpecDistributionModulesIngress: required") } - type Plain SpecDistributionModulesDrVeleroEks + if v, ok := raw["externalDns"]; !ok || v == nil { + return fmt.Errorf("field externalDns in SpecDistributionModulesIngress: required") + } + if v, ok := raw["nginx"]; !ok || v == nil { + return fmt.Errorf("field nginx in SpecDistributionModulesIngress: required") + } + type Plain SpecDistributionModulesIngress var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesDrVeleroEks(plain) + *j = SpecDistributionModulesIngress(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesDrVelero) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesLoggingCustomOutputs) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["eks"]; !ok || v == nil { - return fmt.Errorf("field eks in SpecDistributionModulesDrVelero: required") + if v, ok := raw["audit"]; !ok || v == nil { + return fmt.Errorf("field audit in SpecDistributionModulesLoggingCustomOutputs: required") } - type Plain SpecDistributionModulesDrVelero + if v, ok := raw["errors"]; !ok || v == nil { + return fmt.Errorf("field errors in SpecDistributionModulesLoggingCustomOutputs: required") + } + if v, ok := raw["events"]; !ok || v == nil { + return fmt.Errorf("field events in SpecDistributionModulesLoggingCustomOutputs: required") + } + if v, ok := raw["infra"]; !ok || v == nil { + return fmt.Errorf("field infra in SpecDistributionModulesLoggingCustomOutputs: required") + } + if v, ok := raw["ingressNginx"]; !ok || v == nil { + return fmt.Errorf("field ingressNginx in SpecDistributionModulesLoggingCustomOutputs: required") + } + if v, ok := raw["kubernetes"]; !ok || v == nil { + return fmt.Errorf("field kubernetes in SpecDistributionModulesLoggingCustomOutputs: required") + } + if v, ok := raw["systemdCommon"]; !ok || v == nil { + return fmt.Errorf("field systemdCommon in SpecDistributionModulesLoggingCustomOutputs: required") + } + if v, ok := raw["systemdEtcd"]; !ok || v == nil { + return fmt.Errorf("field systemdEtcd in SpecDistributionModulesLoggingCustomOutputs: required") + } + type Plain SpecDistributionModulesLoggingCustomOutputs var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesDrVelero(plain) + *j = SpecDistributionModulesLoggingCustomOutputs(plain) return nil } +type TypesKubeLabels map[string]string + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesDr) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetes) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesDr: required") + if v, ok := raw["apiServer"]; !ok || v == nil { + return fmt.Errorf("field apiServer in SpecKubernetes: required") } - type Plain SpecDistributionModulesDr + if v, ok := raw["nodeAllowedSshPublicKey"]; !ok || v == nil { + return fmt.Errorf("field nodeAllowedSshPublicKey in SpecKubernetes: required") + } + if v, ok := raw["nodePools"]; !ok || v == nil { + return fmt.Errorf("field nodePools in SpecKubernetes: required") + } + if v, ok := raw["nodePoolsLaunchKind"]; !ok || v == nil { + return fmt.Errorf("field nodePoolsLaunchKind in SpecKubernetes: required") + } + type Plain SpecKubernetes var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesDr(plain) + *j = SpecKubernetes(plain) return nil } +var enumValues_SpecDistributionModulesLoggingLokiBackend = []interface{}{ + "minio", + "externalEndpoint", +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressClusterIssuerRoute53) UnmarshalJSON(b []byte) error { +func (j *SpecPluginsHelmReleasesElemSetElem) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["hostedZoneId"]; !ok || v == nil { - return fmt.Errorf("field hostedZoneId in SpecDistributionModulesIngressClusterIssuerRoute53: required") - } - if v, ok := raw["iamRoleArn"]; !ok || v == nil { - return fmt.Errorf("field iamRoleArn in SpecDistributionModulesIngressClusterIssuerRoute53: required") + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecPluginsHelmReleasesElemSetElem: required") } - if v, ok := raw["region"]; !ok || v == nil { - return fmt.Errorf("field region in SpecDistributionModulesIngressClusterIssuerRoute53: required") + if v, ok := raw["value"]; !ok || v == nil { + return fmt.Errorf("field value in SpecPluginsHelmReleasesElemSetElem: required") } - type Plain SpecDistributionModulesIngressClusterIssuerRoute53 + type Plain SpecPluginsHelmReleasesElemSetElem var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesIngressClusterIssuerRoute53(plain) - return nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *TypesKubeTolerationEffect) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_TypesKubeTolerationEffect { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesKubeTolerationEffect, v) - } - *j = TypesKubeTolerationEffect(v) + *j = SpecPluginsHelmReleasesElemSetElem(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuthPomeriumSecrets) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesLogging) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["COOKIE_SECRET"]; !ok || v == nil { - return fmt.Errorf("field COOKIE_SECRET in SpecDistributionModulesAuthPomeriumSecrets: required") - } - if v, ok := raw["IDP_CLIENT_SECRET"]; !ok || v == nil { - return fmt.Errorf("field IDP_CLIENT_SECRET in SpecDistributionModulesAuthPomeriumSecrets: required") - } - if v, ok := raw["SHARED_SECRET"]; !ok || v == nil { - return fmt.Errorf("field SHARED_SECRET in SpecDistributionModulesAuthPomeriumSecrets: required") - } - if v, ok := raw["SIGNING_KEY"]; !ok || v == nil { - return fmt.Errorf("field SIGNING_KEY in SpecDistributionModulesAuthPomeriumSecrets: required") + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesLogging: required") } - type Plain SpecDistributionModulesAuthPomeriumSecrets + type Plain SpecDistributionModulesLogging var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesAuthPomeriumSecrets(plain) + *j = SpecDistributionModulesLogging(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesMonitoringType) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesLoggingLokiBackend) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesMonitoringType { + for _, expected := range enumValues_SpecDistributionModulesLoggingLokiBackend { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesMonitoringType, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingLokiBackend, v) } - *j = SpecDistributionModulesMonitoringType(v) + *j = SpecDistributionModulesLoggingLokiBackend(v) return nil } +type TypesKubeResourcesLimits struct { + // The CPU limit for the Pod. Example: `1000m`. + Cpu *string `json:"cpu,omitempty" yaml:"cpu,omitempty" mapstructure:"cpu,omitempty"` + + // The memory limit for the Pod. Example: `1G`. + Memory *string `json:"memory,omitempty" yaml:"memory,omitempty" mapstructure:"memory,omitempty"` +} + +type TypesKubeResourcesRequests struct { + // The CPU request for the Pod, in cores. Example: `500m`. + Cpu *string `json:"cpu,omitempty" yaml:"cpu,omitempty" mapstructure:"cpu,omitempty"` + + // The memory request for the Pod. Example: `500M`. + Memory *string `json:"memory,omitempty" yaml:"memory,omitempty" mapstructure:"memory,omitempty"` +} + +type TypesKubeResources struct { + // Limits corresponds to the JSON schema field "limits". + Limits *TypesKubeResourcesLimits `json:"limits,omitempty" yaml:"limits,omitempty" mapstructure:"limits,omitempty"` + + // Requests corresponds to the JSON schema field "requests". + Requests *TypesKubeResourcesRequests `json:"requests,omitempty" yaml:"requests,omitempty" mapstructure:"requests,omitempty"` +} + +type TypesAwsS3KeyPrefix string + +var enumValues_SpecDistributionModulesLoggingOpensearchType = []interface{}{ + "single", + "triple", +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressCertManagerClusterIssuerType) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { +func (j *SpecToolsConfigurationTerraformStateS3) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { return err } - var ok bool - for _, expected := range enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType { - if reflect.DeepEqual(v, expected) { - ok = true - break - } + if v, ok := raw["bucketName"]; !ok || v == nil { + return fmt.Errorf("field bucketName in SpecToolsConfigurationTerraformStateS3: required") + } + if v, ok := raw["keyPrefix"]; !ok || v == nil { + return fmt.Errorf("field keyPrefix in SpecToolsConfigurationTerraformStateS3: required") + } + if v, ok := raw["region"]; !ok || v == nil { + return fmt.Errorf("field region in SpecToolsConfigurationTerraformStateS3: required") } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType, v) + type Plain SpecToolsConfigurationTerraformStateS3 + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err } - *j = SpecDistributionModulesIngressCertManagerClusterIssuerType(v) + *j = SpecToolsConfigurationTerraformStateS3(plain) return nil } -var enumValues_TypesKubeTolerationEffect_1 = []interface{}{ - "NoSchedule", - "PreferNoSchedule", - "NoExecute", -} - // UnmarshalJSON implements json.Unmarshaler. -func (j *TypesKubeTolerationEffect_1) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesLoggingType) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_TypesKubeTolerationEffect_1 { + for _, expected := range enumValues_SpecDistributionModulesLoggingType { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesKubeTolerationEffect_1, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingType, v) } - *j = TypesKubeTolerationEffect_1(v) + *j = SpecDistributionModulesLoggingType(v) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressCertManagerClusterIssuer) UnmarshalJSON(b []byte) error { +func (j *SpecToolsConfigurationTerraformState) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["email"]; !ok || v == nil { - return fmt.Errorf("field email in SpecDistributionModulesIngressCertManagerClusterIssuer: required") - } - if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecDistributionModulesIngressCertManagerClusterIssuer: required") - } - if v, ok := raw["route53"]; !ok || v == nil { - return fmt.Errorf("field route53 in SpecDistributionModulesIngressCertManagerClusterIssuer: required") + if v, ok := raw["s3"]; !ok || v == nil { + return fmt.Errorf("field s3 in SpecToolsConfigurationTerraformState: required") } - type Plain SpecDistributionModulesIngressCertManagerClusterIssuer + type Plain SpecToolsConfigurationTerraformState var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesIngressCertManagerClusterIssuer(plain) + *j = SpecToolsConfigurationTerraformState(plain) return nil } +var enumValues_SpecDistributionModulesLoggingType = []interface{}{ + "none", + "opensearch", + "loki", + "customOutputs", +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressCertManager) UnmarshalJSON(b []byte) error { +func (j *SpecToolsConfigurationTerraform) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["clusterIssuer"]; !ok || v == nil { - return fmt.Errorf("field clusterIssuer in SpecDistributionModulesIngressCertManager: required") + if v, ok := raw["state"]; !ok || v == nil { + return fmt.Errorf("field state in SpecToolsConfigurationTerraform: required") } - type Plain SpecDistributionModulesIngressCertManager + type Plain SpecToolsConfigurationTerraform var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesIngressCertManager(plain) + *j = SpecToolsConfigurationTerraform(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressDNSPrivate) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { +func (j *SpecDistributionModulesLoggingOpensearchType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { return err } - if v, ok := raw["create"]; !ok || v == nil { - return fmt.Errorf("field create in SpecDistributionModulesIngressDNSPrivate: required") - } - if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecDistributionModulesIngressDNSPrivate: required") - } - if v, ok := raw["vpcId"]; !ok || v == nil { - return fmt.Errorf("field vpcId in SpecDistributionModulesIngressDNSPrivate: required") + var ok bool + for _, expected := range enumValues_SpecDistributionModulesLoggingOpensearchType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } } - type Plain SpecDistributionModulesIngressDNSPrivate - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingOpensearchType, v) } - *j = SpecDistributionModulesIngressDNSPrivate(plain) + *j = SpecDistributionModulesLoggingOpensearchType(v) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressDNSPublic) UnmarshalJSON(b []byte) error { +func (j *SpecToolsConfiguration) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["create"]; !ok || v == nil { - return fmt.Errorf("field create in SpecDistributionModulesIngressDNSPublic: required") - } - if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecDistributionModulesIngressDNSPublic: required") + if v, ok := raw["terraform"]; !ok || v == nil { + return fmt.Errorf("field terraform in SpecToolsConfiguration: required") } - type Plain SpecDistributionModulesIngressDNSPublic + type Plain SpecToolsConfiguration var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesIngressDNSPublic(plain) + *j = SpecToolsConfiguration(plain) return nil } -var enumValues_TypesKubeTolerationOperator_1 = []interface{}{ - "Exists", - "Equal", -} - // UnmarshalJSON implements json.Unmarshaler. -func (j *TypesKubeTolerationOperator_1) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_TypesKubeTolerationOperator_1 { + for _, expected := range enumValues_SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesKubeTolerationOperator_1, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior, v) } - *j = TypesKubeTolerationOperator_1(v) + *j = SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior(v) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressDNS) UnmarshalJSON(b []byte) error { +func (j *Spec) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["private"]; !ok || v == nil { - return fmt.Errorf("field private in SpecDistributionModulesIngressDNS: required") + if v, ok := raw["distribution"]; !ok || v == nil { + return fmt.Errorf("field distribution in Spec: required") } - if v, ok := raw["public"]; !ok || v == nil { - return fmt.Errorf("field public in SpecDistributionModulesIngressDNS: required") + if v, ok := raw["distributionVersion"]; !ok || v == nil { + return fmt.Errorf("field distributionVersion in Spec: required") } - type Plain SpecDistributionModulesIngressDNS + if v, ok := raw["kubernetes"]; !ok || v == nil { + return fmt.Errorf("field kubernetes in Spec: required") + } + if v, ok := raw["region"]; !ok || v == nil { + return fmt.Errorf("field region in Spec: required") + } + if v, ok := raw["toolsConfiguration"]; !ok || v == nil { + return fmt.Errorf("field toolsConfiguration in Spec: required") + } + type Plain Spec var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesIngressDNS(plain) + if len(plain.DistributionVersion) < 1 { + return fmt.Errorf("field %s length: must be >= %d", "distributionVersion", 1) + } + *j = Spec(plain) return nil } +var enumValues_SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior = []interface{}{ + "create", + "replace", + "merge", +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressExternalDNS) UnmarshalJSON(b []byte) error { +func (j *TypesKubeToleration) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["privateIamRoleArn"]; !ok || v == nil { - return fmt.Errorf("field privateIamRoleArn in SpecDistributionModulesIngressExternalDNS: required") + if v, ok := raw["effect"]; !ok || v == nil { + return fmt.Errorf("field effect in TypesKubeToleration: required") } - if v, ok := raw["publicIamRoleArn"]; !ok || v == nil { - return fmt.Errorf("field publicIamRoleArn in SpecDistributionModulesIngressExternalDNS: required") + if v, ok := raw["key"]; !ok || v == nil { + return fmt.Errorf("field key in TypesKubeToleration: required") } - type Plain SpecDistributionModulesIngressExternalDNS + type Plain TypesKubeToleration var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesIngressExternalDNS(plain) + *j = TypesKubeToleration(plain) return nil } +type TypesKubeToleration struct { + // Effect corresponds to the JSON schema field "effect". + Effect TypesKubeTolerationEffect `json:"effect" yaml:"effect" mapstructure:"effect"` + + // The key of the toleration + Key string `json:"key" yaml:"key" mapstructure:"key"` + + // Operator corresponds to the JSON schema field "operator". + Operator *TypesKubeTolerationOperator `json:"operator,omitempty" yaml:"operator,omitempty" mapstructure:"operator,omitempty"` + + // The value of the toleration + Value *string `json:"value,omitempty" yaml:"value,omitempty" mapstructure:"value,omitempty"` +} + +const ( + TypesKubeTolerationOperatorEqual TypesKubeTolerationOperator = "Equal" + TypesKubeTolerationOperatorExists TypesKubeTolerationOperator = "Exists" +) + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressNginxTLSProvider) UnmarshalJSON(b []byte) error { +func (j *TypesKubeTolerationOperator) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesIngressNginxTLSProvider { + for _, expected := range enumValues_TypesKubeTolerationOperator { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressNginxTLSProvider, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesKubeTolerationOperator, v) } - *j = SpecDistributionModulesIngressNginxTLSProvider(v) + *j = TypesKubeTolerationOperator(v) return nil } -// UnmarshalJSON implements json.Unmarshaler. -func (j *TypesKubeToleration_1) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["effect"]; !ok || v == nil { - return fmt.Errorf("field effect in TypesKubeToleration_1: required") - } - if v, ok := raw["key"]; !ok || v == nil { - return fmt.Errorf("field key in TypesKubeToleration_1: required") - } - if v, ok := raw["value"]; !ok || v == nil { - return fmt.Errorf("field value in TypesKubeToleration_1: required") - } - type Plain TypesKubeToleration_1 - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = TypesKubeToleration_1(plain) - return nil +var enumValues_TypesKubeTolerationOperator = []interface{}{ + "Exists", + "Equal", } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressNginxTLSSecret) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["ca"]; !ok || v == nil { - return fmt.Errorf("field ca in SpecDistributionModulesIngressNginxTLSSecret: required") - } - if v, ok := raw["cert"]; !ok || v == nil { - return fmt.Errorf("field cert in SpecDistributionModulesIngressNginxTLSSecret: required") - } - if v, ok := raw["key"]; !ok || v == nil { - return fmt.Errorf("field key in SpecDistributionModulesIngressNginxTLSSecret: required") - } - type Plain SpecDistributionModulesIngressNginxTLSSecret - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistributionModulesIngressNginxTLSSecret(plain) - return nil -} +type TypesKubeTolerationOperator string -var enumValues_TypesKubeTolerationEffect = []interface{}{ - "NoSchedule", - "PreferNoSchedule", - "NoExecute", -} +const ( + TypesKubeTolerationEffectNoExecute TypesKubeTolerationEffect = "NoExecute" + TypesKubeTolerationEffectPreferNoSchedule TypesKubeTolerationEffect = "PreferNoSchedule" + TypesKubeTolerationEffectNoSchedule TypesKubeTolerationEffect = "NoSchedule" +) // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressNginxTLS) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { +func (j *TypesKubeTolerationEffect) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { return err } - if v, ok := raw["provider"]; !ok || v == nil { - return fmt.Errorf("field provider in SpecDistributionModulesIngressNginxTLS: required") + var ok bool + for _, expected := range enumValues_TypesKubeTolerationEffect { + if reflect.DeepEqual(v, expected) { + ok = true + break + } } - type Plain SpecDistributionModulesIngressNginxTLS - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesKubeTolerationEffect, v) } - *j = SpecDistributionModulesIngressNginxTLS(plain) + *j = TypesKubeTolerationEffect(v) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuthPomerium_2) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesAuthPomeriumSecrets) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["secrets"]; !ok || v == nil { - return fmt.Errorf("field secrets in SpecDistributionModulesAuthPomerium_2: required") + if v, ok := raw["COOKIE_SECRET"]; !ok || v == nil { + return fmt.Errorf("field COOKIE_SECRET in SpecDistributionModulesAuthPomeriumSecrets: required") } - type Plain SpecDistributionModulesAuthPomerium_2 + if v, ok := raw["IDP_CLIENT_SECRET"]; !ok || v == nil { + return fmt.Errorf("field IDP_CLIENT_SECRET in SpecDistributionModulesAuthPomeriumSecrets: required") + } + if v, ok := raw["SHARED_SECRET"]; !ok || v == nil { + return fmt.Errorf("field SHARED_SECRET in SpecDistributionModulesAuthPomeriumSecrets: required") + } + if v, ok := raw["SIGNING_KEY"]; !ok || v == nil { + return fmt.Errorf("field SIGNING_KEY in SpecDistributionModulesAuthPomeriumSecrets: required") + } + type Plain SpecDistributionModulesAuthPomeriumSecrets var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesAuthPomerium_2(plain) + *j = SpecDistributionModulesAuthPomeriumSecrets(plain) return nil } +type TypesKubeNodeSelector_1 map[string]string + +type TypesKubeTolerationEffect_1 string + +var enumValues_TypesKubeTolerationEffect_1 = []interface{}{ + "NoSchedule", + "PreferNoSchedule", + "NoExecute", +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesMonitoringMimirBackend) UnmarshalJSON(b []byte) error { +func (j *TypesKubeTolerationEffect_1) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesMonitoringMimirBackend { + for _, expected := range enumValues_TypesKubeTolerationEffect_1 { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesMonitoringMimirBackend, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesKubeTolerationEffect_1, v) } - *j = SpecDistributionModulesMonitoringMimirBackend(v) + *j = TypesKubeTolerationEffect_1(v) return nil } +const ( + TypesKubeTolerationEffect_1_NoSchedule TypesKubeTolerationEffect_1 = "NoSchedule" + TypesKubeTolerationEffect_1_PreferNoSchedule TypesKubeTolerationEffect_1 = "PreferNoSchedule" + TypesKubeTolerationEffect_1_NoExecute TypesKubeTolerationEffect_1 = "NoExecute" +) + +type TypesKubeTolerationOperator_1 string + +var enumValues_TypesKubeTolerationOperator_1 = []interface{}{ + "Exists", + "Equal", +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressNginxType) UnmarshalJSON(b []byte) error { +func (j *TypesKubeTolerationOperator_1) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesIngressNginxType { + for _, expected := range enumValues_TypesKubeTolerationOperator_1 { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressNginxType, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesKubeTolerationOperator_1, v) } - *j = SpecDistributionModulesIngressNginxType(v) + *j = TypesKubeTolerationOperator_1(v) return nil } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressNginx) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesIngressNginx: required") - } - type Plain SpecDistributionModulesIngressNginx - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistributionModulesIngressNginx(plain) - return nil +const ( + TypesKubeTolerationOperator_1_Exists TypesKubeTolerationOperator_1 = "Exists" + TypesKubeTolerationOperator_1_Equal TypesKubeTolerationOperator_1 = "Equal" +) + +type TypesKubeToleration_1 struct { + // Effect corresponds to the JSON schema field "effect". + Effect TypesKubeTolerationEffect_1 `json:"effect" yaml:"effect" mapstructure:"effect"` + + // Key corresponds to the JSON schema field "key". + Key string `json:"key" yaml:"key" mapstructure:"key"` + + // Operator corresponds to the JSON schema field "operator". + Operator *TypesKubeTolerationOperator_1 `json:"operator,omitempty" yaml:"operator,omitempty" mapstructure:"operator,omitempty"` + + // Value corresponds to the JSON schema field "value". + Value string `json:"value" yaml:"value" mapstructure:"value"` } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngress) UnmarshalJSON(b []byte) error { +func (j *TypesKubeToleration_1) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["baseDomain"]; !ok || v == nil { - return fmt.Errorf("field baseDomain in SpecDistributionModulesIngress: required") - } - if v, ok := raw["certManager"]; !ok || v == nil { - return fmt.Errorf("field certManager in SpecDistributionModulesIngress: required") - } - if v, ok := raw["dns"]; !ok || v == nil { - return fmt.Errorf("field dns in SpecDistributionModulesIngress: required") + if v, ok := raw["effect"]; !ok || v == nil { + return fmt.Errorf("field effect in TypesKubeToleration_1: required") } - if v, ok := raw["externalDns"]; !ok || v == nil { - return fmt.Errorf("field externalDns in SpecDistributionModulesIngress: required") + if v, ok := raw["key"]; !ok || v == nil { + return fmt.Errorf("field key in TypesKubeToleration_1: required") } - if v, ok := raw["nginx"]; !ok || v == nil { - return fmt.Errorf("field nginx in SpecDistributionModulesIngress: required") + if v, ok := raw["value"]; !ok || v == nil { + return fmt.Errorf("field value in TypesKubeToleration_1: required") } - type Plain SpecDistributionModulesIngress + type Plain TypesKubeToleration_1 var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesIngress(plain) + *j = TypesKubeToleration_1(plain) return nil } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesLoggingCustomOutputs) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["audit"]; !ok || v == nil { - return fmt.Errorf("field audit in SpecDistributionModulesLoggingCustomOutputs: required") - } - if v, ok := raw["errors"]; !ok || v == nil { - return fmt.Errorf("field errors in SpecDistributionModulesLoggingCustomOutputs: required") - } - if v, ok := raw["events"]; !ok || v == nil { - return fmt.Errorf("field events in SpecDistributionModulesLoggingCustomOutputs: required") - } - if v, ok := raw["infra"]; !ok || v == nil { - return fmt.Errorf("field infra in SpecDistributionModulesLoggingCustomOutputs: required") - } - if v, ok := raw["ingressNginx"]; !ok || v == nil { - return fmt.Errorf("field ingressNginx in SpecDistributionModulesLoggingCustomOutputs: required") - } - if v, ok := raw["kubernetes"]; !ok || v == nil { - return fmt.Errorf("field kubernetes in SpecDistributionModulesLoggingCustomOutputs: required") - } - if v, ok := raw["systemdCommon"]; !ok || v == nil { - return fmt.Errorf("field systemdCommon in SpecDistributionModulesLoggingCustomOutputs: required") - } - if v, ok := raw["systemdEtcd"]; !ok || v == nil { - return fmt.Errorf("field systemdEtcd in SpecDistributionModulesLoggingCustomOutputs: required") - } - type Plain SpecDistributionModulesLoggingCustomOutputs - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistributionModulesLoggingCustomOutputs(plain) - return nil +type TypesFuryModuleComponentOverrides_1 struct { + // NodeSelector corresponds to the JSON schema field "nodeSelector". + NodeSelector TypesKubeNodeSelector_1 `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` + + // Tolerations corresponds to the JSON schema field "tolerations". + Tolerations []TypesKubeToleration_1 `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` +} + +var enumValues_TypesKubeTolerationEffect = []interface{}{ + "NoSchedule", + "PreferNoSchedule", + "NoExecute", } +type TypesKubeTolerationEffect string + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesLogging) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesAuthPomerium_2) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesLogging: required") + if v, ok := raw["secrets"]; !ok || v == nil { + return fmt.Errorf("field secrets in SpecDistributionModulesAuthPomerium_2: required") } - type Plain SpecDistributionModulesLogging + type Plain SpecDistributionModulesAuthPomerium_2 var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesLogging(plain) + *j = SpecDistributionModulesAuthPomerium_2(plain) return nil } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesLoggingLokiBackend) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_SpecDistributionModulesLoggingLokiBackend { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingLokiBackend, v) - } - *j = SpecDistributionModulesLoggingLokiBackend(v) - return nil -} +type TypesAwsSshPubKey string + +type TypesEnvRef string + +type TypesFileRef string + +type TypesIpAddress string + +type TypesSemVer string + +type TypesSshPubKey string + +type TypesUri string // UnmarshalJSON implements json.Unmarshaler. func (j *SpecDistributionCommonProvider) UnmarshalJSON(b []byte) error { @@ -4223,24 +4494,8 @@ func (j *SpecDistributionCommonProvider) UnmarshalJSON(b []byte) error { return nil } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesLoggingType) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_SpecDistributionModulesLoggingType { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingType, v) - } - *j = SpecDistributionModulesLoggingType(v) - return nil +var enumValues_EksclusterKfdV1Alpha2Kind = []interface{}{ + "EKSCluster", } // UnmarshalJSON implements json.Unmarshaler. @@ -4263,25 +4518,7 @@ func (j *EksclusterKfdV1Alpha2Kind) UnmarshalJSON(b []byte) error { return nil } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesLoggingOpensearchType) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_SpecDistributionModulesLoggingOpensearchType { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingOpensearchType, v) - } - *j = SpecDistributionModulesLoggingOpensearchType(v) - return nil -} +type TypesKubeNodeSelector map[string]string // UnmarshalJSON implements json.Unmarshaler. func (j *Metadata) UnmarshalJSON(b []byte) error { diff --git a/pkg/apis/ekscluster/v1alpha2/public/schema.go b/pkg/apis/ekscluster/v1alpha2/public/schema.go index ff34c16a3..e59754a1f 100644 --- a/pkg/apis/ekscluster/v1alpha2/public/schema.go +++ b/pkg/apis/ekscluster/v1alpha2/public/schema.go @@ -8,7 +8,7 @@ import ( "reflect" ) -// A Fury Cluster deployed through AWS's Elastic Kubernetes Service +// A KFD Cluster deployed on top of AWS's Elastic Kubernetes Service (EKS). type EksclusterKfdV1Alpha2 struct { // ApiVersion corresponds to the JSON schema field "apiVersion". ApiVersion string `json:"apiVersion" yaml:"apiVersion" mapstructure:"apiVersion"` @@ -28,7 +28,8 @@ type EksclusterKfdV1Alpha2Kind string const EksclusterKfdV1Alpha2KindEKSCluster EksclusterKfdV1Alpha2Kind = "EKSCluster" type Metadata struct { - // Name corresponds to the JSON schema field "name". + // The name of the cluster. It will also be used as a prefix for all the other + // resources created. Name string `json:"name" yaml:"name" mapstructure:"name"` } @@ -36,7 +37,9 @@ type Spec struct { // Distribution corresponds to the JSON schema field "distribution". Distribution SpecDistribution `json:"distribution" yaml:"distribution" mapstructure:"distribution"` - // DistributionVersion corresponds to the JSON schema field "distributionVersion". + // Defines which KFD version will be installed and, in consequence, the Kubernetes + // version used to create the cluster. It supports git tags and branches. Example: + // `v1.30.1`. DistributionVersion string `json:"distributionVersion" yaml:"distributionVersion" mapstructure:"distributionVersion"` // Infrastructure corresponds to the JSON schema field "infrastructure". @@ -48,14 +51,15 @@ type Spec struct { // Plugins corresponds to the JSON schema field "plugins". Plugins *SpecPlugins `json:"plugins,omitempty" yaml:"plugins,omitempty" mapstructure:"plugins,omitempty"` - // Region corresponds to the JSON schema field "region". + // Defines in which AWS region the cluster and all the related resources will be + // created. Region TypesAwsRegion `json:"region" yaml:"region" mapstructure:"region"` // This map defines which will be the common tags that will be added to all the // resources created on AWS. Tags TypesAwsTags `json:"tags,omitempty" yaml:"tags,omitempty" mapstructure:"tags,omitempty"` - // ToolsConfiguration corresponds to the JSON schema field "toolsConfiguration". + // Configuration for tools used by furyctl, like Terraform. ToolsConfiguration SpecToolsConfiguration `json:"toolsConfiguration" yaml:"toolsConfiguration" mapstructure:"toolsConfiguration"` } @@ -70,29 +74,35 @@ type SpecDistribution struct { Modules SpecDistributionModules `json:"modules" yaml:"modules" mapstructure:"modules"` } +// Common configuration for all the distribution modules. type SpecDistributionCommon struct { - // The node selector to use to place the pods for all the KFD modules + // The node selector to use to place the pods for all the KFD modules. Follows + // Kubernetes selector format. Example: `node.kubernetes.io/role: infra`. NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` // Provider corresponds to the JSON schema field "provider". Provider *SpecDistributionCommonProvider `json:"provider,omitempty" yaml:"provider,omitempty" mapstructure:"provider,omitempty"` // URL of the registry where to pull images from for the Distribution phase. - // (Default is registry.sighup.io/fury). - // - // NOTE: If plugins are pulling from the default registry, the registry will be - // replaced for these plugins too. + // (Default is `registry.sighup.io/fury`). Registry *string `json:"registry,omitempty" yaml:"registry,omitempty" mapstructure:"registry,omitempty"` - // The relative path to the vendor directory, does not need to be changed + // The relative path to the vendor directory, does not need to be changed. RelativeVendorPath *string `json:"relativeVendorPath,omitempty" yaml:"relativeVendorPath,omitempty" mapstructure:"relativeVendorPath,omitempty"` - // The tolerations that will be added to the pods for all the KFD modules + // An array with the tolerations that will be added to the pods for all the KFD + // modules. Follows Kubernetes tolerations format. Example: + // + // ```yaml + // - effect: NoSchedule + // key: node.kubernetes.io/role + // value: infra + // ``` Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` } type SpecDistributionCommonProvider struct { - // The type of the provider, must be EKS if specified + // The provider type. Don't set. FOR INTERNAL USE ONLY. Type string `json:"type" yaml:"type" mapstructure:"type"` } @@ -294,8 +304,11 @@ type SpecDistributionModules struct { Tracing *SpecDistributionModulesTracing `json:"tracing,omitempty" yaml:"tracing,omitempty" mapstructure:"tracing,omitempty"` } +// Configuration for the Auth module. type SpecDistributionModulesAuth struct { - // The base domain for the auth module + // Base domain for the ingresses created by the Auth module (Gangplank, Pomerium, + // Dex). Notice that when nginx type is dual, these will use the `external` + // ingress class. BaseDomain *string `json:"baseDomain,omitempty" yaml:"baseDomain,omitempty" mapstructure:"baseDomain,omitempty"` // Dex corresponds to the JSON schema field "dex". @@ -311,11 +324,25 @@ type SpecDistributionModulesAuth struct { Provider SpecDistributionModulesAuthProvider `json:"provider" yaml:"provider" mapstructure:"provider"` } +// Configuration for the Dex package. type SpecDistributionModulesAuthDex struct { - // The additional static clients for dex + // Additional static clients defitions that will be added to the default clients + // included with the distribution in Dex's configuration. Example: + // + // ```yaml + // additionalStaticClients: + // - id: my-custom-client + // name: "A custom additional static client" + // redirectURIs: + // - "https://myapp.tld/redirect" + // - "https://alias.tld/oidc-callback" + // secret: supersecretpassword + // ``` + // Reference: https://dexidp.io/docs/connectors/local/ AdditionalStaticClients []interface{} `json:"additionalStaticClients,omitempty" yaml:"additionalStaticClients,omitempty" mapstructure:"additionalStaticClients,omitempty"` - // The connectors for dex + // A list with each item defining a Dex connector. Follows Dex connectors + // configuration format: https://dexidp.io/docs/connectors/ Connectors []interface{} `json:"connectors" yaml:"connectors" mapstructure:"connectors"` // Expiry corresponds to the JSON schema field "expiry". @@ -333,25 +360,29 @@ type SpecDistributionModulesAuthDexExpiry struct { SigningKeys *string `json:"signingKeys,omitempty" yaml:"signingKeys,omitempty" mapstructure:"signingKeys,omitempty"` } +// Override the common configuration with a particular configuration for the Auth +// module. type SpecDistributionModulesAuthOverrides struct { - // Ingresses corresponds to the JSON schema field "ingresses". + // Override the definition of the Auth module ingresses. Ingresses SpecDistributionModulesAuthOverridesIngresses `json:"ingresses,omitempty" yaml:"ingresses,omitempty" mapstructure:"ingresses,omitempty"` - // The node selector to use to place the pods for the auth module + // Set to override the node selector used to place the pods of the Auth module. NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` - // The tolerations that will be added to the pods for the auth module + // Set to override the tolerations that will be added to the pods of the Auth + // module. Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` } type SpecDistributionModulesAuthOverridesIngress struct { - // The host of the ingress + // Use this host for the ingress instead of the default one. Host string `json:"host" yaml:"host" mapstructure:"host"` - // The ingress class of the ingress + // Use this ingress class for the ingress instead of the default one. IngressClass string `json:"ingressClass" yaml:"ingressClass" mapstructure:"ingressClass"` } +// Override the definition of the Auth module ingresses. type SpecDistributionModulesAuthOverridesIngresses map[string]SpecDistributionModulesAuthOverridesIngress type SpecDistributionModulesAuthPomerium interface{} @@ -476,15 +507,23 @@ type SpecDistributionModulesAuthProvider struct { // BasicAuth corresponds to the JSON schema field "basicAuth". BasicAuth *SpecDistributionModulesAuthProviderBasicAuth `json:"basicAuth,omitempty" yaml:"basicAuth,omitempty" mapstructure:"basicAuth,omitempty"` - // The type of the provider, must be ***none***, ***sso*** or ***basicAuth*** + // The type of the Auth provider, options are: + // - `none`: will disable authentication in the infrastructural ingresses. + // - `sso`: will protect the infrastructural ingresses with Pomerium and Dex (SSO) + // and require authentication before accessing them. + // - `basicAuth`: will protect the infrastructural ingresses with HTTP basic auth + // (username and password) authentication. + // + // Default is `none`. Type SpecDistributionModulesAuthProviderType `json:"type" yaml:"type" mapstructure:"type"` } +// Configuration for the HTTP Basic Auth provider. type SpecDistributionModulesAuthProviderBasicAuth struct { - // The password for the basic auth + // The password for logging in with the HTTP basic authentication. Password string `json:"password" yaml:"password" mapstructure:"password"` - // The username for the basic auth + // The username for logging in with the HTTP basic authentication. Username string `json:"username" yaml:"username" mapstructure:"username"` } @@ -535,11 +574,16 @@ type SpecDistributionModulesAwsLoadBalancerController struct { Overrides *TypesFuryModuleComponentOverridesWithIAMRoleName `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` } +// Configuration for the Disaster Recovery module. type SpecDistributionModulesDr struct { // Overrides corresponds to the JSON schema field "overrides". Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - // The type of the DR, must be ***none*** or ***eks*** + // The type of the Disaster Recovery, must be `none` or `eks`. `none` disables the + // module and `eks` will install Velero and use an S3 bucket to store the + // backups. + // + // Default is `none`. Type SpecDistributionModulesDrType `json:"type" yaml:"type" mapstructure:"type"` // Velero corresponds to the JSON schema field "velero". @@ -565,10 +609,10 @@ type SpecDistributionModulesDrVelero struct { } type SpecDistributionModulesDrVeleroEks struct { - // The name of the velero bucket + // The name of the bucket for Velero. BucketName TypesAwsS3BucketName `json:"bucketName" yaml:"bucketName" mapstructure:"bucketName"` - // The region where the velero bucket is located + // The region where the bucket for Velero will be located. Region TypesAwsRegion `json:"region" yaml:"region" mapstructure:"region"` } @@ -598,12 +642,15 @@ type SpecDistributionModulesDrVeleroSchedulesCron struct { } type SpecDistributionModulesIngress struct { - // the base domain used for all the KFD ingresses, if in the nginx dual - // configuration, it should be the same as the - // .spec.distribution.modules.ingress.dns.private.name zone + // The base domain used for all the KFD ingresses. If in the nginx `dual` + // configuration type, this value should be the same as the + // `.spec.distribution.modules.ingress.dns.private.name` zone. BaseDomain string `json:"baseDomain" yaml:"baseDomain" mapstructure:"baseDomain"` - // CertManager corresponds to the JSON schema field "certManager". + // Configuration for the cert-manager package. Required even if + // `ingress.nginx.type` is `none`, cert-manager is used for managing other + // certificates in the cluster besides the TLS termination certificates for the + // ingresses. CertManager *SpecDistributionModulesIngressCertManager `json:"certManager,omitempty" yaml:"certManager,omitempty" mapstructure:"certManager,omitempty"` // Dns corresponds to the JSON schema field "dns". @@ -612,13 +659,17 @@ type SpecDistributionModulesIngress struct { // Forecastle corresponds to the JSON schema field "forecastle". Forecastle *SpecDistributionModulesIngressForecastle `json:"forecastle,omitempty" yaml:"forecastle,omitempty" mapstructure:"forecastle,omitempty"` - // Configurations for the nginx ingress controller module + // Configurations for the Ingress nginx controller package. Nginx SpecDistributionModulesIngressNginx `json:"nginx" yaml:"nginx" mapstructure:"nginx"` // Overrides corresponds to the JSON schema field "overrides". Overrides *SpecDistributionModulesIngressOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` } +// Configuration for the cert-manager package. Required even if +// `ingress.nginx.type` is `none`, cert-manager is used for managing other +// certificates in the cluster besides the TLS termination certificates for the +// ingresses. type SpecDistributionModulesIngressCertManager struct { // ClusterIssuer corresponds to the JSON schema field "clusterIssuer". ClusterIssuer SpecDistributionModulesIngressCertManagerClusterIssuer `json:"clusterIssuer" yaml:"clusterIssuer" mapstructure:"clusterIssuer"` @@ -627,17 +678,21 @@ type SpecDistributionModulesIngressCertManager struct { Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` } +// Configuration for the cert-manager's ACME clusterIssuer used to request +// certificates from Let's Encrypt. type SpecDistributionModulesIngressCertManagerClusterIssuer struct { - // The email of the cluster issuer + // The email address to use during the certificate issuing process. Email string `json:"email" yaml:"email" mapstructure:"email"` - // The name of the cluster issuer + // Name of the clusterIssuer. Name string `json:"name" yaml:"name" mapstructure:"name"` - // The custom solvers configurations + // List of challenge solvers to use instead of the default one for the `http01` + // challenge. Solvers []interface{} `json:"solvers,omitempty" yaml:"solvers,omitempty" mapstructure:"solvers,omitempty"` - // The type of the cluster issuer, must be ***dns01*** or ***http01*** + // The type of the clusterIssuer, must be `dns01` for using DNS challenge or + // `http01` for using HTTP challenge. Type *SpecDistributionModulesIngressCertManagerClusterIssuerType `json:"type,omitempty" yaml:"type,omitempty" mapstructure:"type,omitempty"` } @@ -648,6 +703,8 @@ const ( SpecDistributionModulesIngressCertManagerClusterIssuerTypeHttp01 SpecDistributionModulesIngressCertManagerClusterIssuerType = "http01" ) +// DNS definition, used in conjunction with `externalDNS` package to automate DNS +// management and certificates emission. type SpecDistributionModulesIngressDNS struct { // Overrides corresponds to the JSON schema field "overrides". Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` @@ -659,19 +716,23 @@ type SpecDistributionModulesIngressDNS struct { Public SpecDistributionModulesIngressDNSPublic `json:"public" yaml:"public" mapstructure:"public"` } +// The private DNS zone is used only when `ingress.nginx.type` is `dual`, for +// exposing infrastructural services only in the private DNS zone. type SpecDistributionModulesIngressDNSPrivate struct { - // If true, the private hosted zone will be created + // By default, a Terraform data source will be used to get the private DNS zone. + // Set to `true` to create the private zone instead. Create bool `json:"create" yaml:"create" mapstructure:"create"` - // The name of the private hosted zone + // The name of the private hosted zone. Example: `internal.fury-demo.sighup.io`. Name string `json:"name" yaml:"name" mapstructure:"name"` } type SpecDistributionModulesIngressDNSPublic struct { - // If true, the public hosted zone will be created + // By default, a Terraform data source will be used to get the public DNS zone. + // Set to `true` to create the public zone instead. Create bool `json:"create" yaml:"create" mapstructure:"create"` - // The name of the public hosted zone + // The name of the public hosted zone. Name string `json:"name" yaml:"name" mapstructure:"name"` } @@ -687,14 +748,24 @@ type SpecDistributionModulesIngressNginx struct { // Tls corresponds to the JSON schema field "tls". Tls *SpecDistributionModulesIngressNginxTLS `json:"tls,omitempty" yaml:"tls,omitempty" mapstructure:"tls,omitempty"` - // The type of the nginx ingress controller, must be ***none***, ***single*** or - // ***dual*** + // The type of the Ingress nginx controller, options are: + // - `none`: no ingress controller will be installed and no infrastructural + // ingresses will be created. + // - `single`: a single ingress controller with ingress class `nginx` will be + // installed to manage all the ingress resources, infrastructural ingresses will + // be created. + // - `dual`: two independent ingress controllers will be installed, one for the + // `internal` ingress class intended for private ingresses and one for the + // `external` ingress class intended for public ingresses. KFD infrastructural + // ingresses wil use the `internal` ingress class when using the dual type. + // + // Default is `single`. Type SpecDistributionModulesIngressNginxType `json:"type" yaml:"type" mapstructure:"type"` } type SpecDistributionModulesIngressNginxTLS struct { - // The provider of the TLS certificate, must be ***none***, ***certManager*** or - // ***secret*** + // The provider of the TLS certificates for the ingresses, one of: `none`, + // `certManager`, or `secret`. Provider SpecDistributionModulesIngressNginxTLSProvider `json:"provider" yaml:"provider" mapstructure:"provider"` // Secret corresponds to the JSON schema field "secret". @@ -709,15 +780,18 @@ const ( SpecDistributionModulesIngressNginxTLSProviderSecret SpecDistributionModulesIngressNginxTLSProvider = "secret" ) +// Kubernetes TLS secret for the ingresses TLS certificate. type SpecDistributionModulesIngressNginxTLSSecret struct { - // Ca corresponds to the JSON schema field "ca". + // The Certificate Authority certificate file's content. You can use the + // `"{file://}"` notation to get the content from a file. Ca string `json:"ca" yaml:"ca" mapstructure:"ca"` - // The certificate file content or you can use the file notation to get the - // content from a file + // The certificate file's content. You can use the `"{file://}"` notation to + // get the content from a file. Cert string `json:"cert" yaml:"cert" mapstructure:"cert"` - // Key corresponds to the JSON schema field "key". + // The signing key file's content. You can use the `"{file://}"` notation to + // get the content from a file. Key string `json:"key" yaml:"key" mapstructure:"key"` } @@ -729,14 +803,17 @@ const ( SpecDistributionModulesIngressNginxTypeSingle SpecDistributionModulesIngressNginxType = "single" ) +// Override the common configuration with a particular configuration for the +// Ingress module. type SpecDistributionModulesIngressOverrides struct { // Ingresses corresponds to the JSON schema field "ingresses". Ingresses *SpecDistributionModulesIngressOverridesIngresses `json:"ingresses,omitempty" yaml:"ingresses,omitempty" mapstructure:"ingresses,omitempty"` - // The node selector to use to place the pods for the ingress module + // Set to override the node selector used to place the pods of the Ingress module. NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` - // The tolerations that will be added to the pods for the ingress module + // Set to override the tolerations that will be added to the pods of the Ingress + // module. Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` } @@ -745,6 +822,7 @@ type SpecDistributionModulesIngressOverridesIngresses struct { Forecastle *TypesFuryModuleOverridesIngress `json:"forecastle,omitempty" yaml:"forecastle,omitempty" mapstructure:"forecastle,omitempty"` } +// Configuration for the Logging module. type SpecDistributionModulesLogging struct { // Cerebro corresponds to the JSON schema field "cerebro". Cerebro *SpecDistributionModulesLoggingCerebro `json:"cerebro,omitempty" yaml:"cerebro,omitempty" mapstructure:"cerebro,omitempty"` @@ -767,79 +845,87 @@ type SpecDistributionModulesLogging struct { // Overrides corresponds to the JSON schema field "overrides". Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - // selects the logging stack. Choosing none will disable the centralized logging. - // Choosing opensearch will deploy and configure the Logging Operator and an + // Selects the logging stack. Options are: + // - `none`: will disable the centralized logging. + // - `opensearch`: will deploy and configure the Logging Operator and an // OpenSearch cluster (can be single or triple for HA) where the logs will be - // stored. Choosing loki will use a distributed Grafana Loki instead of OpenSearh - // for storage. Choosing customOuput the Logging Operator will be deployed and - // installed but with no local storage, you will have to create the needed Outputs - // and ClusterOutputs to ship the logs to your desired storage. + // stored. + // - `loki`: will use a distributed Grafana Loki instead of OpenSearh for storage. + // - `customOuputs`: the Logging Operator will be deployed and installed but with + // no local storage, you will have to create the needed Outputs and ClusterOutputs + // to ship the logs to your desired storage. + // + // Default is `opensearch`. Type SpecDistributionModulesLoggingType `json:"type" yaml:"type" mapstructure:"type"` } +// DEPRECATED since KFD v1.26.6, 1.27.5, v1.28.0. type SpecDistributionModulesLoggingCerebro struct { // Overrides corresponds to the JSON schema field "overrides". Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` } -// when using the customOutputs logging type, you need to manually specify the spec -// of the several Output and ClusterOutputs that the Logging Operator expects to -// forward the logs collected by the pre-defined flows. +// When using the `customOutputs` logging type, you need to manually specify the +// spec of the several `Output` and `ClusterOutputs` that the Logging Operator +// expects to forward the logs collected by the pre-defined flows. type SpecDistributionModulesLoggingCustomOutputs struct { - // This value defines where the output from Flow will be sent. Will be the `spec` - // section of the `Output` object. It must be a string (and not a YAML object) - // following the OutputSpec definition. Use the nullout output to discard the - // flow. + // This value defines where the output from the `audit` Flow will be sent. This + // will be the `spec` section of the `Output` object. It must be a string (and not + // a YAML object) following the OutputSpec definition. Use the `nullout` output to + // discard the flow: `nullout: {}` Audit string `json:"audit" yaml:"audit" mapstructure:"audit"` - // This value defines where the output from Flow will be sent. Will be the `spec` - // section of the `Output` object. It must be a string (and not a YAML object) - // following the OutputSpec definition. Use the nullout output to discard the - // flow. + // This value defines where the output from the `errors` Flow will be sent. This + // will be the `spec` section of the `Output` object. It must be a string (and not + // a YAML object) following the OutputSpec definition. Use the `nullout` output to + // discard the flow: `nullout: {}` Errors string `json:"errors" yaml:"errors" mapstructure:"errors"` - // This value defines where the output from Flow will be sent. Will be the `spec` - // section of the `Output` object. It must be a string (and not a YAML object) - // following the OutputSpec definition. Use the nullout output to discard the - // flow. + // This value defines where the output from the `events` Flow will be sent. This + // will be the `spec` section of the `Output` object. It must be a string (and not + // a YAML object) following the OutputSpec definition. Use the `nullout` output to + // discard the flow: `nullout: {}` Events string `json:"events" yaml:"events" mapstructure:"events"` - // This value defines where the output from Flow will be sent. Will be the `spec` - // section of the `Output` object. It must be a string (and not a YAML object) - // following the OutputSpec definition. Use the nullout output to discard the - // flow. + // This value defines where the output from the `infra` Flow will be sent. This + // will be the `spec` section of the `Output` object. It must be a string (and not + // a YAML object) following the OutputSpec definition. Use the `nullout` output to + // discard the flow: `nullout: {}` Infra string `json:"infra" yaml:"infra" mapstructure:"infra"` - // This value defines where the output from Flow will be sent. Will be the `spec` - // section of the `Output` object. It must be a string (and not a YAML object) - // following the OutputSpec definition. Use the nullout output to discard the - // flow. + // This value defines where the output from the `ingressNginx` Flow will be sent. + // This will be the `spec` section of the `Output` object. It must be a string + // (and not a YAML object) following the OutputSpec definition. Use the `nullout` + // output to discard the flow: `nullout: {}` IngressNginx string `json:"ingressNginx" yaml:"ingressNginx" mapstructure:"ingressNginx"` - // This value defines where the output from Flow will be sent. Will be the `spec` - // section of the `Output` object. It must be a string (and not a YAML object) - // following the OutputSpec definition. Use the nullout output to discard the - // flow. + // This value defines where the output from the `kubernetes` Flow will be sent. + // This will be the `spec` section of the `Output` object. It must be a string + // (and not a YAML object) following the OutputSpec definition. Use the `nullout` + // output to discard the flow: `nullout: {}` Kubernetes string `json:"kubernetes" yaml:"kubernetes" mapstructure:"kubernetes"` - // This value defines where the output from Flow will be sent. Will be the `spec` - // section of the `Output` object. It must be a string (and not a YAML object) - // following the OutputSpec definition. Use the nullout output to discard the - // flow. + // This value defines where the output from the `systemdCommon` Flow will be sent. + // This will be the `spec` section of the `Output` object. It must be a string + // (and not a YAML object) following the OutputSpec definition. Use the `nullout` + // output to discard the flow: `nullout: {}` SystemdCommon string `json:"systemdCommon" yaml:"systemdCommon" mapstructure:"systemdCommon"` - // This value defines where the output from Flow will be sent. Will be the `spec` - // section of the `Output` object. It must be a string (and not a YAML object) - // following the OutputSpec definition. Use the nullout output to discard the - // flow. + // This value defines where the output from the `systemdEtcd` Flow will be sent. + // This will be the `spec` section of the `Output` object. It must be a string + // (and not a YAML object) following the OutputSpec definition. Use the `nullout` + // output to discard the flow: `nullout: {}` SystemdEtcd string `json:"systemdEtcd" yaml:"systemdEtcd" mapstructure:"systemdEtcd"` } +// Configuration for the Loki package. type SpecDistributionModulesLoggingLoki struct { - // Backend corresponds to the JSON schema field "backend". + // The storage backend type for Loki. `minio` will use an in-cluster MinIO + // deployment for object storage, `externalEndpoint` can be used to point to an + // external object storage instead of deploying an in-cluster MinIO. Backend *SpecDistributionModulesLoggingLokiBackend `json:"backend,omitempty" yaml:"backend,omitempty" mapstructure:"backend,omitempty"` - // ExternalEndpoint corresponds to the JSON schema field "externalEndpoint". + // Configuration for Loki's external storage backend. ExternalEndpoint *SpecDistributionModulesLoggingLokiExternalEndpoint `json:"externalEndpoint,omitempty" yaml:"externalEndpoint,omitempty" mapstructure:"externalEndpoint,omitempty"` // Resources corresponds to the JSON schema field "resources". @@ -853,23 +939,25 @@ const ( SpecDistributionModulesLoggingLokiBackendMinio SpecDistributionModulesLoggingLokiBackend = "minio" ) +// Configuration for Loki's external storage backend. type SpecDistributionModulesLoggingLokiExternalEndpoint struct { - // The access key id of the loki external endpoint + // The access key ID (username) for the external S3-compatible bucket. AccessKeyId *string `json:"accessKeyId,omitempty" yaml:"accessKeyId,omitempty" mapstructure:"accessKeyId,omitempty"` - // The bucket name of the loki external endpoint + // The bucket name of the external S3-compatible object storage. BucketName *string `json:"bucketName,omitempty" yaml:"bucketName,omitempty" mapstructure:"bucketName,omitempty"` - // The endpoint of the loki external endpoint + // External S3-compatible endpoint for Loki's storage. Endpoint *string `json:"endpoint,omitempty" yaml:"endpoint,omitempty" mapstructure:"endpoint,omitempty"` - // If true, the loki external endpoint will be insecure + // If true, will use HTTP as protocol instead of HTTPS. Insecure *bool `json:"insecure,omitempty" yaml:"insecure,omitempty" mapstructure:"insecure,omitempty"` - // The secret access key of the loki external endpoint + // The secret access key (password) for the external S3-compatible bucket. SecretAccessKey *string `json:"secretAccessKey,omitempty" yaml:"secretAccessKey,omitempty" mapstructure:"secretAccessKey,omitempty"` } +// Configuration for Logging's MinIO deployment. type SpecDistributionModulesLoggingMinio struct { // Overrides corresponds to the JSON schema field "overrides". Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` @@ -877,15 +965,15 @@ type SpecDistributionModulesLoggingMinio struct { // RootUser corresponds to the JSON schema field "rootUser". RootUser *SpecDistributionModulesLoggingMinioRootUser `json:"rootUser,omitempty" yaml:"rootUser,omitempty" mapstructure:"rootUser,omitempty"` - // The PVC size for each minio disk, 6 disks total + // The PVC size for each MinIO disk, 6 disks total. StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"` } type SpecDistributionModulesLoggingMinioRootUser struct { - // The password of the minio root user + // The password for the default MinIO root user. Password *string `json:"password,omitempty" yaml:"password,omitempty" mapstructure:"password,omitempty"` - // The username of the minio root user + // The username for the default MinIO root user. Username *string `json:"username,omitempty" yaml:"username,omitempty" mapstructure:"username,omitempty"` } @@ -896,10 +984,11 @@ type SpecDistributionModulesLoggingOpensearch struct { // Resources corresponds to the JSON schema field "resources". Resources *TypesKubeResources `json:"resources,omitempty" yaml:"resources,omitempty" mapstructure:"resources,omitempty"` - // The storage size for the opensearch pods + // The storage size for the OpenSearch volumes. StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"` - // The type of the opensearch, must be ***single*** or ***triple*** + // The type of OpenSearch deployment. One of: `single` for a single replica or + // `triple` for an HA 3-replicas deployment. Type SpecDistributionModulesLoggingOpensearchType `json:"type" yaml:"type" mapstructure:"type"` } @@ -910,6 +999,7 @@ const ( SpecDistributionModulesLoggingOpensearchTypeTriple SpecDistributionModulesLoggingOpensearchType = "triple" ) +// Configuration for the Logging Operator. type SpecDistributionModulesLoggingOperator struct { // Overrides corresponds to the JSON schema field "overrides". Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` @@ -924,7 +1014,7 @@ const ( SpecDistributionModulesLoggingTypeOpensearch SpecDistributionModulesLoggingType = "opensearch" ) -// configuration for the Monitoring module components +// Configuration for the Monitoring module. type SpecDistributionModulesMonitoring struct { // Alertmanager corresponds to the JSON schema field "alertmanager". Alertmanager *SpecDistributionModulesMonitoringAlertManager `json:"alertmanager,omitempty" yaml:"alertmanager,omitempty" mapstructure:"alertmanager,omitempty"` @@ -953,12 +1043,12 @@ type SpecDistributionModulesMonitoring struct { // PrometheusAgent corresponds to the JSON schema field "prometheusAgent". PrometheusAgent *SpecDistributionModulesMonitoringPrometheusAgent `json:"prometheusAgent,omitempty" yaml:"prometheusAgent,omitempty" mapstructure:"prometheusAgent,omitempty"` - // The type of the monitoring, must be ***none***, ***prometheus***, - // ***prometheusAgent*** or ***mimir***. + // The type of the monitoring, must be `none`, `prometheus`, `prometheusAgent` or + // `mimir`. // // - `none`: will disable the whole monitoring stack. // - `prometheus`: will install Prometheus Operator and a preconfigured Prometheus - // instance, Alertmanager, a set of alert rules, exporters needed to monitor all + // instace, Alertmanager, a set of alert rules, exporters needed to monitor all // the components of the cluster, Grafana and a series of dashboards to view the // collected metrics, and more. // - `prometheusAgent`: wil install Prometheus operator, an instance of Prometheus @@ -966,9 +1056,10 @@ type SpecDistributionModulesMonitoring struct { // needed to get metrics for the status of the cluster and the workloads. Useful // when having a centralized (remote) Prometheus where to ship the metrics and not // storing them locally in the cluster. - // - `mimir`: will install the same as the `prometheus` option, and in addition - // Grafana Mimir that allows for longer retention of metrics and the usage of - // Object Storage. + // - `mimir`: will install the same as the `prometheus` option, plus Grafana Mimir + // that allows for longer retention of metrics and the usage of Object Storage. + // + // Default is `prometheus`. Type SpecDistributionModulesMonitoringType `json:"type" yaml:"type" mapstructure:"type"` // X509Exporter corresponds to the JSON schema field "x509Exporter". @@ -976,14 +1067,15 @@ type SpecDistributionModulesMonitoring struct { } type SpecDistributionModulesMonitoringAlertManager struct { - // The webhook url to send deadman switch monitoring, for example to use with - // healthchecks.io + // The webhook URL to send dead man's switch monitoring, for example to use with + // healthchecks.io. DeadManSwitchWebhookUrl *string `json:"deadManSwitchWebhookUrl,omitempty" yaml:"deadManSwitchWebhookUrl,omitempty" mapstructure:"deadManSwitchWebhookUrl,omitempty"` - // If true, the default rules will be installed + // Set to false to avoid installing the Prometheus rules (alerts) included with + // the distribution. InstallDefaultRules *bool `json:"installDefaultRules,omitempty" yaml:"installDefaultRules,omitempty" mapstructure:"installDefaultRules,omitempty"` - // The slack webhook url to send alerts + // The Slack webhook URL where to send the infrastructural and workload alerts to. SlackWebhookUrl *string `json:"slackWebhookUrl,omitempty" yaml:"slackWebhookUrl,omitempty" mapstructure:"slackWebhookUrl,omitempty"` } @@ -1022,17 +1114,22 @@ type SpecDistributionModulesMonitoringKubeStateMetrics struct { Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` } +// Configuration for the Mimir package. type SpecDistributionModulesMonitoringMimir struct { - // The backend for the mimir pods, must be ***minio*** or ***externalEndpoint*** + // The storage backend type for Mimir. `minio` will use an in-cluster MinIO + // deployment for object storage, `externalEndpoint` can be used to point to an + // external S3-compatible object storage instead of deploying an in-cluster MinIO. Backend *SpecDistributionModulesMonitoringMimirBackend `json:"backend,omitempty" yaml:"backend,omitempty" mapstructure:"backend,omitempty"` - // ExternalEndpoint corresponds to the JSON schema field "externalEndpoint". + // Configuration for Mimir's external storage backend. ExternalEndpoint *SpecDistributionModulesMonitoringMimirExternalEndpoint `json:"externalEndpoint,omitempty" yaml:"externalEndpoint,omitempty" mapstructure:"externalEndpoint,omitempty"` // Overrides corresponds to the JSON schema field "overrides". Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - // The retention time for the mimir pods + // The retention time for the logs stored in Mimir. Default is `30d`. Value must + // match the regular expression `[0-9]+(ns|us|µs|ms|s|m|h|d|w|y)` where y = 365 + // days. RetentionTime *string `json:"retentionTime,omitempty" yaml:"retentionTime,omitempty" mapstructure:"retentionTime,omitempty"` } @@ -1043,23 +1140,25 @@ const ( SpecDistributionModulesMonitoringMimirBackendMinio SpecDistributionModulesMonitoringMimirBackend = "minio" ) +// Configuration for Mimir's external storage backend. type SpecDistributionModulesMonitoringMimirExternalEndpoint struct { - // The access key id of the external mimir backend + // The access key ID (username) for the external S3-compatible bucket. AccessKeyId *string `json:"accessKeyId,omitempty" yaml:"accessKeyId,omitempty" mapstructure:"accessKeyId,omitempty"` - // The bucket name of the external mimir backend + // The bucket name of the external S3-compatible object storage. BucketName *string `json:"bucketName,omitempty" yaml:"bucketName,omitempty" mapstructure:"bucketName,omitempty"` - // The endpoint of the external mimir backend + // External S3-compatible endpoint for Mimir's storage. Endpoint *string `json:"endpoint,omitempty" yaml:"endpoint,omitempty" mapstructure:"endpoint,omitempty"` - // If true, the external mimir backend will not use tls + // If true, will use HTTP as protocol instead of HTTPS. Insecure *bool `json:"insecure,omitempty" yaml:"insecure,omitempty" mapstructure:"insecure,omitempty"` - // The secret access key of the external mimir backend + // The secret access key (password) for the external S3-compatible bucket. SecretAccessKey *string `json:"secretAccessKey,omitempty" yaml:"secretAccessKey,omitempty" mapstructure:"secretAccessKey,omitempty"` } +// Configuration for Monitoring's MinIO deployment. type SpecDistributionModulesMonitoringMinio struct { // Overrides corresponds to the JSON schema field "overrides". Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` @@ -1067,15 +1166,15 @@ type SpecDistributionModulesMonitoringMinio struct { // RootUser corresponds to the JSON schema field "rootUser". RootUser *SpecDistributionModulesMonitoringMinioRootUser `json:"rootUser,omitempty" yaml:"rootUser,omitempty" mapstructure:"rootUser,omitempty"` - // The storage size for the minio pods + // The PVC size for each MinIO disk, 6 disks total. StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"` } type SpecDistributionModulesMonitoringMinioRootUser struct { - // The password for the minio root user + // The password for the default MinIO root user. Password *string `json:"password,omitempty" yaml:"password,omitempty" mapstructure:"password,omitempty"` - // The username for the minio root user + // The username for the default MinIO root user. Username *string `json:"username,omitempty" yaml:"username,omitempty" mapstructure:"username,omitempty"` } @@ -1092,13 +1191,13 @@ type SpecDistributionModulesMonitoringPrometheus struct { // Resources corresponds to the JSON schema field "resources". Resources *TypesKubeResources `json:"resources,omitempty" yaml:"resources,omitempty" mapstructure:"resources,omitempty"` - // The retention size for the k8s Prometheus instance. + // The retention size for the `k8s` Prometheus instance. RetentionSize *string `json:"retentionSize,omitempty" yaml:"retentionSize,omitempty" mapstructure:"retentionSize,omitempty"` - // The retention time for the k8s Prometheus instance. + // The retention time for the `k8s` Prometheus instance. RetentionTime *string `json:"retentionTime,omitempty" yaml:"retentionTime,omitempty" mapstructure:"retentionTime,omitempty"` - // The storage size for the k8s Prometheus instance. + // The storage size for the `k8s` Prometheus instance. StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"` } @@ -1134,9 +1233,10 @@ type SpecDistributionModulesMonitoringX509Exporter struct { Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` } +// Configuration for the Networking module. type SpecDistributionModulesNetworking struct { // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` // TigeraOperator corresponds to the JSON schema field "tigeraOperator". TigeraOperator *SpecDistributionModulesNetworkingTigeraOperator `json:"tigeraOperator,omitempty" yaml:"tigeraOperator,omitempty" mapstructure:"tigeraOperator,omitempty"` @@ -1147,6 +1247,7 @@ type SpecDistributionModulesNetworkingTigeraOperator struct { Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` } +// Configuration for the Policy module. type SpecDistributionModulesPolicy struct { // Gatekeeper corresponds to the JSON schema field "gatekeeper". Gatekeeper *SpecDistributionModulesPolicyGatekeeper `json:"gatekeeper,omitempty" yaml:"gatekeeper,omitempty" mapstructure:"gatekeeper,omitempty"` @@ -1157,20 +1258,27 @@ type SpecDistributionModulesPolicy struct { // Overrides corresponds to the JSON schema field "overrides". Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - // The type of security to use, either ***none***, ***gatekeeper*** or - // ***kyverno*** + // The type of policy enforcement to use, either `none`, `gatekeeper` or + // `kyverno`. + // + // Default is `none`. Type SpecDistributionModulesPolicyType `json:"type" yaml:"type" mapstructure:"type"` } +// Configuration for the Gatekeeper package. type SpecDistributionModulesPolicyGatekeeper struct { // This parameter adds namespaces to Gatekeeper's exemption list, so it will not // enforce the constraints on them. AdditionalExcludedNamespaces []string `json:"additionalExcludedNamespaces,omitempty" yaml:"additionalExcludedNamespaces,omitempty" mapstructure:"additionalExcludedNamespaces,omitempty"` - // The enforcement action to use for the gatekeeper module + // The default enforcement action to use for the included constraints. `deny` will + // block the admission when violations to the policies are found, `warn` will show + // a message to the user but will admit the violating requests and `dryrun` won't + // give any feedback to the user but it will log the violations. EnforcementAction SpecDistributionModulesPolicyGatekeeperEnforcementAction `json:"enforcementAction" yaml:"enforcementAction" mapstructure:"enforcementAction"` - // If true, the default policies will be installed + // Set to `false` to avoid installing the default Gatekeeper policies (constraints + // templates and constraints) included with the distribution. InstallDefaultPolicies bool `json:"installDefaultPolicies" yaml:"installDefaultPolicies" mapstructure:"installDefaultPolicies"` // Overrides corresponds to the JSON schema field "overrides". @@ -1185,18 +1293,22 @@ const ( SpecDistributionModulesPolicyGatekeeperEnforcementActionWarn SpecDistributionModulesPolicyGatekeeperEnforcementAction = "warn" ) +// Configuration for the Kyverno package. type SpecDistributionModulesPolicyKyverno struct { // This parameter adds namespaces to Kyverno's exemption list, so it will not - // enforce the constraints on them. + // enforce the policies on them. AdditionalExcludedNamespaces []string `json:"additionalExcludedNamespaces,omitempty" yaml:"additionalExcludedNamespaces,omitempty" mapstructure:"additionalExcludedNamespaces,omitempty"` - // If true, the default policies will be installed + // Set to `false` to avoid installing the default Kyverno policies included with + // distribution. InstallDefaultPolicies bool `json:"installDefaultPolicies" yaml:"installDefaultPolicies" mapstructure:"installDefaultPolicies"` // Overrides corresponds to the JSON schema field "overrides". Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - // The validation failure action to use for the kyverno module + // The validation failure action to use for the policies, `Enforce` will block + // when a request does not comply with the policies and `Audit` will not block but + // log when a request does not comply with the policies. ValidationFailureAction SpecDistributionModulesPolicyKyvernoValidationFailureAction `json:"validationFailureAction" yaml:"validationFailureAction" mapstructure:"validationFailureAction"` } @@ -1215,6 +1327,7 @@ const ( SpecDistributionModulesPolicyTypeNone SpecDistributionModulesPolicyType = "none" ) +// Configuration for the Tracing module. type SpecDistributionModulesTracing struct { // Minio corresponds to the JSON schema field "minio". Minio *SpecDistributionModulesTracingMinio `json:"minio,omitempty" yaml:"minio,omitempty" mapstructure:"minio,omitempty"` @@ -1225,10 +1338,14 @@ type SpecDistributionModulesTracing struct { // Tempo corresponds to the JSON schema field "tempo". Tempo *SpecDistributionModulesTracingTempo `json:"tempo,omitempty" yaml:"tempo,omitempty" mapstructure:"tempo,omitempty"` - // The type of tracing to use, either ***none*** or ***tempo*** + // The type of tracing to use, either `none` or `tempo`. `none` will disable the + // Tracing module and `tempo` will install a Grafana Tempo deployment. + // + // Default is `tempo`. Type SpecDistributionModulesTracingType `json:"type" yaml:"type" mapstructure:"type"` } +// Configuration for Tracing's MinIO deployment. type SpecDistributionModulesTracingMinio struct { // Overrides corresponds to the JSON schema field "overrides". Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` @@ -1236,29 +1353,32 @@ type SpecDistributionModulesTracingMinio struct { // RootUser corresponds to the JSON schema field "rootUser". RootUser *SpecDistributionModulesTracingMinioRootUser `json:"rootUser,omitempty" yaml:"rootUser,omitempty" mapstructure:"rootUser,omitempty"` - // The storage size for the minio pods + // The PVC size for each MinIO disk, 6 disks total. StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"` } type SpecDistributionModulesTracingMinioRootUser struct { - // The password for the minio root user + // The password for the default MinIO root user. Password *string `json:"password,omitempty" yaml:"password,omitempty" mapstructure:"password,omitempty"` - // The username for the minio root user + // The username for the default MinIO root user. Username *string `json:"username,omitempty" yaml:"username,omitempty" mapstructure:"username,omitempty"` } +// Configuration for the Tempo package. type SpecDistributionModulesTracingTempo struct { - // The backend for the tempo pods, must be ***minio*** or ***externalEndpoint*** + // The storage backend type for Tempo. `minio` will use an in-cluster MinIO + // deployment for object storage, `externalEndpoint` can be used to point to an + // external S3-compatible object storage instead of deploying an in-cluster MinIO. Backend *SpecDistributionModulesTracingTempoBackend `json:"backend,omitempty" yaml:"backend,omitempty" mapstructure:"backend,omitempty"` - // ExternalEndpoint corresponds to the JSON schema field "externalEndpoint". + // Configuration for Tempo's external storage backend. ExternalEndpoint *SpecDistributionModulesTracingTempoExternalEndpoint `json:"externalEndpoint,omitempty" yaml:"externalEndpoint,omitempty" mapstructure:"externalEndpoint,omitempty"` // Overrides corresponds to the JSON schema field "overrides". Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - // The retention time for the tempo pods + // The retention time for the traces stored in Tempo. RetentionTime *string `json:"retentionTime,omitempty" yaml:"retentionTime,omitempty" mapstructure:"retentionTime,omitempty"` } @@ -1269,20 +1389,21 @@ const ( SpecDistributionModulesTracingTempoBackendMinio SpecDistributionModulesTracingTempoBackend = "minio" ) +// Configuration for Tempo's external storage backend. type SpecDistributionModulesTracingTempoExternalEndpoint struct { - // The access key id of the external tempo backend + // The access key ID (username) for the external S3-compatible bucket. AccessKeyId *string `json:"accessKeyId,omitempty" yaml:"accessKeyId,omitempty" mapstructure:"accessKeyId,omitempty"` - // The bucket name of the external tempo backend + // The bucket name of the external S3-compatible object storage. BucketName *string `json:"bucketName,omitempty" yaml:"bucketName,omitempty" mapstructure:"bucketName,omitempty"` - // The endpoint of the external tempo backend + // External S3-compatible endpoint for Tempo's storage. Endpoint *string `json:"endpoint,omitempty" yaml:"endpoint,omitempty" mapstructure:"endpoint,omitempty"` - // If true, the external tempo backend will not use tls + // If true, will use HTTP as protocol instead of HTTPS. Insecure *bool `json:"insecure,omitempty" yaml:"insecure,omitempty" mapstructure:"insecure,omitempty"` - // The secret access key of the external tempo backend + // The secret access key (password) for the external S3-compatible bucket. SecretAccessKey *string `json:"secretAccessKey,omitempty" yaml:"secretAccessKey,omitempty" mapstructure:"secretAccessKey,omitempty"` } @@ -1294,88 +1415,98 @@ const ( ) type SpecInfrastructure struct { - // This key defines the VPC that will be created in AWS + // Vpc corresponds to the JSON schema field "vpc". Vpc *SpecInfrastructureVpc `json:"vpc,omitempty" yaml:"vpc,omitempty" mapstructure:"vpc,omitempty"` - // This section defines the creation of VPN bastions + // Vpn corresponds to the JSON schema field "vpn". Vpn *SpecInfrastructureVpn `json:"vpn,omitempty" yaml:"vpn,omitempty" mapstructure:"vpn,omitempty"` } +// Configuration for the VPC that will be created to host the EKS cluster and its +// related resources. If you already have a VPC that you want to use, leave this +// section empty and use `.spec.kubernetes.vpcId` instead. type SpecInfrastructureVpc struct { // Network corresponds to the JSON schema field "network". Network SpecInfrastructureVpcNetwork `json:"network" yaml:"network" mapstructure:"network"` } type SpecInfrastructureVpcNetwork struct { - // This is the CIDR of the VPC that will be created + // The network CIDR for the VPC that will be created Cidr TypesCidr `json:"cidr" yaml:"cidr" mapstructure:"cidr"` // SubnetsCidrs corresponds to the JSON schema field "subnetsCidrs". SubnetsCidrs SpecInfrastructureVpcNetworkSubnetsCidrs `json:"subnetsCidrs" yaml:"subnetsCidrs" mapstructure:"subnetsCidrs"` } +// Network CIDRS configuration for private and public subnets. type SpecInfrastructureVpcNetworkSubnetsCidrs struct { - // These are the CIRDs for the private subnets, where the nodes, the pods, and the + // Network CIRDs for the private subnets, where the nodes, the pods, and the // private load balancers will be created Private []TypesCidr `json:"private" yaml:"private" mapstructure:"private"` - // These are the CIDRs for the public subnets, where the public load balancers and - // the VPN servers will be created + // Network CIDRs for the public subnets, where the public load balancers and the + // VPN servers will be created Public []TypesCidr `json:"public" yaml:"public" mapstructure:"public"` } +// Configuration for the VPN server instances. type SpecInfrastructureVpn struct { - // This value defines the prefix that will be used to create the bucket name where - // the VPN servers will store the states + // This value defines the prefix for the bucket name where the VPN servers will + // store their state (VPN certificates, users). BucketNamePrefix *TypesAwsS3BucketNamePrefix `json:"bucketNamePrefix,omitempty" yaml:"bucketNamePrefix,omitempty" mapstructure:"bucketNamePrefix,omitempty"` - // The dhParamsBits size used for the creation of the .pem file that will be used - // in the dh openvpn server.conf file + // The `dhParamsBits` size used for the creation of the .pem file that will be + // used in the dh openvpn server.conf file. DhParamsBits *int `json:"dhParamsBits,omitempty" yaml:"dhParamsBits,omitempty" mapstructure:"dhParamsBits,omitempty"` - // The size of the disk in GB + // The size of the disk in GB for each VPN server. Example: entering `50` will + // create disks of 50 GB. DiskSize *int `json:"diskSize,omitempty" yaml:"diskSize,omitempty" mapstructure:"diskSize,omitempty"` - // Overrides the default IAM user name for the VPN + // Overrides IAM user name for the VPN. Default is to use the cluster name. IamUserNameOverride *TypesAwsIamRoleName `json:"iamUserNameOverride,omitempty" yaml:"iamUserNameOverride,omitempty" mapstructure:"iamUserNameOverride,omitempty"` - // The size of the AWS EC2 instance + // The type of the AWS EC2 instance for each VPN server. Follows AWS EC2 + // nomenclature. Example: `t3-micro`. InstanceType *string `json:"instanceType,omitempty" yaml:"instanceType,omitempty" mapstructure:"instanceType,omitempty"` - // The number of instances to create, 0 to skip the creation + // The number of VPN server instances to create, `0` to skip the creation. Instances *int `json:"instances,omitempty" yaml:"instances,omitempty" mapstructure:"instances,omitempty"` - // The username of the account to create in the bastion's operating system + // The username of the account to create in the bastion's operating system. OperatorName *string `json:"operatorName,omitempty" yaml:"operatorName,omitempty" mapstructure:"operatorName,omitempty"` - // The port used by the OpenVPN server + // The port where each OpenVPN server will listen for connections. Port *TypesTcpPort `json:"port,omitempty" yaml:"port,omitempty" mapstructure:"port,omitempty"` // Ssh corresponds to the JSON schema field "ssh". Ssh SpecInfrastructureVpnSsh `json:"ssh" yaml:"ssh" mapstructure:"ssh"` - // The VPC ID where the VPN servers will be created, required only if - // .spec.infrastructure.vpc is omitted + // The ID of the VPC where the VPN server instances will be created, required only + // if `.spec.infrastructure.vpc` is omitted. VpcId *TypesAwsVpcId `json:"vpcId,omitempty" yaml:"vpcId,omitempty" mapstructure:"vpcId,omitempty"` - // The CIDR that will be used to assign IP addresses to the VPN clients when - // connected + // The network CIDR that will be used to assign IP addresses to the VPN clients + // when connected. VpnClientsSubnetCidr TypesCidr `json:"vpnClientsSubnetCidr" yaml:"vpnClientsSubnetCidr" mapstructure:"vpnClientsSubnetCidr"` } type SpecInfrastructureVpnSsh struct { - // The CIDR enabled in the security group that can access the bastions in SSH + // The network CIDR enabled in the security group to access the VPN servers + // (bastions) via SSH. Setting this to `0.0.0.0/0` will allow any source. AllowedFromCidrs []TypesCidr `json:"allowedFromCidrs" yaml:"allowedFromCidrs" mapstructure:"allowedFromCidrs"` - // The github user name list that will be used to get the ssh public key that will - // be added as authorized key to the operatorName user + // List of GitHub usernames from whom get their SSH public key and add as + // authorized keys of the `operatorName` user. GithubUsersName []string `json:"githubUsersName" yaml:"githubUsersName" mapstructure:"githubUsersName"` - // This value defines the public keys that will be added to the bastion's - // operating system NOTES: Not yet implemented + // **NOT IN USE**, use `githubUsersName` instead. This value defines the public + // keys that will be added to the bastion's operating system. PublicKeys []interface{} `json:"publicKeys,omitempty" yaml:"publicKeys,omitempty" mapstructure:"publicKeys,omitempty"` } +// Defines the Kubernetes components configuration and the values needed for the +// `kubernetes` phase of furyctl. type SpecKubernetes struct { // ApiServer corresponds to the JSON schema field "apiServer". ApiServer SpecKubernetesAPIServer `json:"apiServer" yaml:"apiServer" mapstructure:"apiServer"` @@ -1383,71 +1514,81 @@ type SpecKubernetes struct { // AwsAuth corresponds to the JSON schema field "awsAuth". AwsAuth *SpecKubernetesAwsAuth `json:"awsAuth,omitempty" yaml:"awsAuth,omitempty" mapstructure:"awsAuth,omitempty"` - // Overrides the default IAM role name prefix for the EKS cluster + // Overrides the default prefix for the IAM role name of the EKS cluster. If not + // set, a name will be generated from the cluster name. ClusterIAMRoleNamePrefixOverride *TypesAwsIamRoleNamePrefix `json:"clusterIAMRoleNamePrefixOverride,omitempty" yaml:"clusterIAMRoleNamePrefixOverride,omitempty" mapstructure:"clusterIAMRoleNamePrefixOverride,omitempty"` - // Optional Kubernetes Cluster log retention in days. Defaults to 90 days. - LogRetentionDays *int `json:"logRetentionDays,omitempty" yaml:"logRetentionDays,omitempty" mapstructure:"logRetentionDays,omitempty"` + // Optional Kubernetes Cluster log retention in CloudWatch, expressed in days. + // Setting the value to zero (`0`) makes retention last forever. Default is `90` + // days. + LogRetentionDays *SpecKubernetesLogRetentionDays `json:"logRetentionDays,omitempty" yaml:"logRetentionDays,omitempty" mapstructure:"logRetentionDays,omitempty"` // Optional list of Kubernetes Cluster log types to enable. Defaults to all types. LogsTypes []SpecKubernetesLogsTypesElem `json:"logsTypes,omitempty" yaml:"logsTypes,omitempty" mapstructure:"logsTypes,omitempty"` - // This key contains the ssh public key that can connect to the nodes via SSH - // using the ec2-user user + // The SSH public key that can connect to the nodes via SSH using the `ec2-user` + // user. Example: the contents of your `~/.ssh/id_ras.pub` file. NodeAllowedSshPublicKey interface{} `json:"nodeAllowedSshPublicKey" yaml:"nodeAllowedSshPublicKey" mapstructure:"nodeAllowedSshPublicKey"` // NodePools corresponds to the JSON schema field "nodePools". NodePools []SpecKubernetesNodePool `json:"nodePools" yaml:"nodePools" mapstructure:"nodePools"` - // Either `launch_configurations`, `launch_templates` or `both`. For new clusters - // use `launch_templates`, for existing cluster you'll need to migrate from - // `launch_configurations` to `launch_templates` using `both` as interim. + // Accepted values are `launch_configurations`, `launch_templates` or `both`. For + // new clusters use `launch_templates`, for adopting existing cluster you'll need + // to migrate from `launch_configurations` to `launch_templates` using `both` as + // interim. NodePoolsLaunchKind SpecKubernetesNodePoolsLaunchKind `json:"nodePoolsLaunchKind" yaml:"nodePoolsLaunchKind" mapstructure:"nodePoolsLaunchKind"` - // This value defines the CIDR that will be used to assign IP addresses to the - // services + // This value defines the network CIDR that will be used to assign IP addresses to + // Kubernetes services. ServiceIpV4Cidr *TypesCidr `json:"serviceIpV4Cidr,omitempty" yaml:"serviceIpV4Cidr,omitempty" mapstructure:"serviceIpV4Cidr,omitempty"` - // This value defines the subnet IDs where the EKS cluster will be created, - // required only if .spec.infrastructure.vpc is omitted + // Required only if `.spec.infrastructure.vpc` is omitted. This value defines the + // ID of the subnet where the EKS cluster will be created. SubnetIds []TypesAwsSubnetId `json:"subnetIds,omitempty" yaml:"subnetIds,omitempty" mapstructure:"subnetIds,omitempty"` - // This value defines the VPC ID where the EKS cluster will be created, required - // only if .spec.infrastructure.vpc is omitted + // Required only if `.spec.infrastructure.vpc` is omitted. This value defines the + // ID of the VPC where the EKS cluster and its related resources will be created. VpcId *TypesAwsVpcId `json:"vpcId,omitempty" yaml:"vpcId,omitempty" mapstructure:"vpcId,omitempty"` - // Overrides the default IAM role name prefix for the EKS workers + // Overrides the default prefix for the IAM role name of the EKS workers. If not + // set, a name will be generated from the cluster name. WorkersIAMRoleNamePrefixOverride *TypesAwsIamRoleNamePrefix `json:"workersIAMRoleNamePrefixOverride,omitempty" yaml:"workersIAMRoleNamePrefixOverride,omitempty" mapstructure:"workersIAMRoleNamePrefixOverride,omitempty"` } type SpecKubernetesAPIServer struct { - // This value defines if the API server will be accessible only from the private - // subnets + // This value defines if the Kubernetes API server will be accessible from the + // private subnets. Default it `true`. PrivateAccess bool `json:"privateAccess" yaml:"privateAccess" mapstructure:"privateAccess"` - // This value defines the CIDRs that will be allowed to access the API server from - // the private subnets + // The network CIDRs from the private subnets that will be allowed access the + // Kubernetes API server. PrivateAccessCidrs []TypesCidr `json:"privateAccessCidrs,omitempty" yaml:"privateAccessCidrs,omitempty" mapstructure:"privateAccessCidrs,omitempty"` - // This value defines if the API server will be accessible from the public subnets + // This value defines if the Kubernetes API server will be accessible from the + // public subnets. Default is `false`. PublicAccess bool `json:"publicAccess" yaml:"publicAccess" mapstructure:"publicAccess"` - // This value defines the CIDRs that will be allowed to access the API server from - // the public subnets + // The network CIDRs from the public subnets that will be allowed access the + // Kubernetes API server. PublicAccessCidrs []TypesCidr `json:"publicAccessCidrs,omitempty" yaml:"publicAccessCidrs,omitempty" mapstructure:"publicAccessCidrs,omitempty"` } +// Optional additional security configuration for EKS IAM via the `aws-auth` +// configmap. +// +// Ref: https://docs.aws.amazon.com/eks/latest/userguide/auth-configmap.html type SpecKubernetesAwsAuth struct { // This optional array defines additional AWS accounts that will be added to the - // aws-auth configmap + // `aws-auth` configmap. AdditionalAccounts []string `json:"additionalAccounts,omitempty" yaml:"additionalAccounts,omitempty" mapstructure:"additionalAccounts,omitempty"` // This optional array defines additional IAM roles that will be added to the - // aws-auth configmap + // `aws-auth` configmap. Roles []SpecKubernetesAwsAuthRole `json:"roles,omitempty" yaml:"roles,omitempty" mapstructure:"roles,omitempty"` // This optional array defines additional IAM users that will be added to the - // aws-auth configmap + // `aws-auth` configmap. Users []SpecKubernetesAwsAuthUser `json:"users,omitempty" yaml:"users,omitempty" mapstructure:"users,omitempty"` } @@ -1473,6 +1614,8 @@ type SpecKubernetesAwsAuthUser struct { Username string `json:"username" yaml:"username" mapstructure:"username"` } +type SpecKubernetesLogRetentionDays int + type SpecKubernetesLogsTypesElem string const ( @@ -1483,6 +1626,8 @@ const ( SpecKubernetesLogsTypesElemScheduler SpecKubernetesLogsTypesElem = "scheduler" ) +// Array with all the node pool definitions that will join the cluster. Each item +// is an object. type SpecKubernetesNodePool struct { // AdditionalFirewallRules corresponds to the JSON schema field // "additionalFirewallRules". @@ -1492,31 +1637,32 @@ type SpecKubernetesNodePool struct { Ami *SpecKubernetesNodePoolAmi `json:"ami,omitempty" yaml:"ami,omitempty" mapstructure:"ami,omitempty"` // This optional array defines additional target groups to attach to the instances - // in the node pool + // in the node pool. AttachedTargetGroups []TypesAwsArn `json:"attachedTargetGroups,omitempty" yaml:"attachedTargetGroups,omitempty" mapstructure:"attachedTargetGroups,omitempty"` - // The container runtime to use for the nodes + // The container runtime to use in the nodes of the node pool. Default is + // `containerd`. ContainerRuntime *SpecKubernetesNodePoolContainerRuntime `json:"containerRuntime,omitempty" yaml:"containerRuntime,omitempty" mapstructure:"containerRuntime,omitempty"` // Instance corresponds to the JSON schema field "instance". Instance SpecKubernetesNodePoolInstance `json:"instance" yaml:"instance" mapstructure:"instance"` - // Kubernetes labels that will be added to the nodes + // Kubernetes labels that will be added to the nodes. Labels TypesKubeLabels_1 `json:"labels,omitempty" yaml:"labels,omitempty" mapstructure:"labels,omitempty"` - // The name of the node pool + // The name of the node pool. Name string `json:"name" yaml:"name" mapstructure:"name"` // Size corresponds to the JSON schema field "size". Size SpecKubernetesNodePoolSize `json:"size" yaml:"size" mapstructure:"size"` - // This value defines the subnet IDs where the nodes will be created + // Optional list of subnet IDs where to create the nodes. SubnetIds []TypesAwsSubnetId `json:"subnetIds,omitempty" yaml:"subnetIds,omitempty" mapstructure:"subnetIds,omitempty"` - // AWS tags that will be added to the ASG and EC2 instances + // AWS tags that will be added to the ASG and EC2 instances. Tags TypesAwsTags `json:"tags,omitempty" yaml:"tags,omitempty" mapstructure:"tags,omitempty"` - // Kubernetes taints that will be added to the nodes + // Kubernetes taints that will be added to the nodes. Taints TypesKubeTaints `json:"taints,omitempty" yaml:"taints,omitempty" mapstructure:"taints,omitempty"` // Type corresponds to the JSON schema field "type". @@ -1536,10 +1682,11 @@ type SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock struct { // Protocol corresponds to the JSON schema field "protocol". Protocol TypesAwsIpProtocol `json:"protocol" yaml:"protocol" mapstructure:"protocol"` - // Tags corresponds to the JSON schema field "tags". + // Additional AWS tags for the Firewall rule. Tags TypesAwsTags `json:"tags,omitempty" yaml:"tags,omitempty" mapstructure:"tags,omitempty"` - // Type corresponds to the JSON schema field "type". + // The type of the Firewall rule, can be `ingress` for incoming traffic or + // `egress` for outgoing traffic. Type SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType `json:"type" yaml:"type" mapstructure:"type"` } @@ -1550,6 +1697,7 @@ const ( SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockTypeIngress SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType = "ingress" ) +// Port range for the Firewall Rule. type SpecKubernetesNodePoolAdditionalFirewallRulePorts struct { // From corresponds to the JSON schema field "from". From TypesTcpPort `json:"from" yaml:"from" mapstructure:"from"` @@ -1559,22 +1707,23 @@ type SpecKubernetesNodePoolAdditionalFirewallRulePorts struct { } type SpecKubernetesNodePoolAdditionalFirewallRuleSelf struct { - // The name of the FW rule + // The name of the Firewall rule. Name string `json:"name" yaml:"name" mapstructure:"name"` // Ports corresponds to the JSON schema field "ports". Ports SpecKubernetesNodePoolAdditionalFirewallRulePorts `json:"ports" yaml:"ports" mapstructure:"ports"` - // The protocol of the FW rule + // The protocol of the Firewall rule. Protocol TypesAwsIpProtocol `json:"protocol" yaml:"protocol" mapstructure:"protocol"` - // If true, the source will be the security group itself + // If `true`, the source will be the security group itself. Self bool `json:"self" yaml:"self" mapstructure:"self"` - // The tags of the FW rule + // Additional AWS tags for the Firewall rule. Tags TypesAwsTags `json:"tags,omitempty" yaml:"tags,omitempty" mapstructure:"tags,omitempty"` - // The type of the FW rule can be ingress or egress + // The type of the Firewall rule, can be `ingress` for incoming traffic or + // `egress` for outgoing traffic. Type SpecKubernetesNodePoolAdditionalFirewallRuleSelfType `json:"type" yaml:"type" mapstructure:"type"` } @@ -1586,22 +1735,23 @@ const ( ) type SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId struct { - // The name of the FW rule + // The name for the additional Firewall rule Security Group. Name string `json:"name" yaml:"name" mapstructure:"name"` // Ports corresponds to the JSON schema field "ports". Ports SpecKubernetesNodePoolAdditionalFirewallRulePorts `json:"ports" yaml:"ports" mapstructure:"ports"` - // The protocol of the FW rule + // The protocol of the Firewall rule. Protocol TypesAwsIpProtocol `json:"protocol" yaml:"protocol" mapstructure:"protocol"` - // The source security group ID + // The source security group ID. SourceSecurityGroupId string `json:"sourceSecurityGroupId" yaml:"sourceSecurityGroupId" mapstructure:"sourceSecurityGroupId"` - // The tags of the FW rule + // Additional AWS tags for the Firewall rule. Tags TypesAwsTags `json:"tags,omitempty" yaml:"tags,omitempty" mapstructure:"tags,omitempty"` - // The type of the FW rule can be ingress or egress + // The type of the Firewall rule, can be `ingress` for incoming traffic or + // `egress` for outgoing traffic. Type SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType `json:"type" yaml:"type" mapstructure:"type"` } @@ -1612,9 +1762,11 @@ const ( SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdTypeIngress SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType = "ingress" ) +// Optional additional firewall rules that will be attached to the nodes. type SpecKubernetesNodePoolAdditionalFirewallRules struct { - // The CIDR blocks for the FW rule. At the moment the first item of the list will - // be used, others will be ignored. + // The CIDR blocks objects definition for the Firewall rule. Even though it is a + // list, only one item is currently supported. See + // https://github.com/sighupio/fury-eks-installer/issues/46 for more details. CidrBlocks []SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock `json:"cidrBlocks,omitempty" yaml:"cidrBlocks,omitempty" mapstructure:"cidrBlocks,omitempty"` // Self corresponds to the JSON schema field "self". @@ -1626,10 +1778,10 @@ type SpecKubernetesNodePoolAdditionalFirewallRules struct { } type SpecKubernetesNodePoolAmi struct { - // The AMI ID to use for the nodes + // Optional. Custom AMI ID to use for the nodes. Id string `json:"id" yaml:"id" mapstructure:"id"` - // The owner of the AMI + // Optional. The owner of the custom AMI. Owner string `json:"owner" yaml:"owner" mapstructure:"owner"` } @@ -1640,20 +1792,25 @@ const ( SpecKubernetesNodePoolContainerRuntimeDocker SpecKubernetesNodePoolContainerRuntime = "docker" ) +// Configuration for the instances that will be used in the node pool. type SpecKubernetesNodePoolInstance struct { - // MaxPods corresponds to the JSON schema field "maxPods". + // Set the maximum pods per node to a custom value. If not set will use EKS + // default value that depends on the instance type. + // + // Ref: + // https://github.com/awslabs/amazon-eks-ami/blob/main/templates/shared/runtime/eni-max-pods.txt MaxPods *int `json:"maxPods,omitempty" yaml:"maxPods,omitempty" mapstructure:"maxPods,omitempty"` - // If true, the nodes will be created as spot instances + // If `true`, the nodes will be created as spot instances. Default is `false`. Spot *bool `json:"spot,omitempty" yaml:"spot,omitempty" mapstructure:"spot,omitempty"` - // The instance type to use for the nodes + // The instance type to use for the nodes. Type string `json:"type" yaml:"type" mapstructure:"type"` - // The size of the disk in GB + // The size of the disk in GB. VolumeSize *int `json:"volumeSize,omitempty" yaml:"volumeSize,omitempty" mapstructure:"volumeSize,omitempty"` - // VolumeType corresponds to the JSON schema field "volumeType". + // Volume type for the instance disk. Default is `gp2`. VolumeType *SpecKubernetesNodePoolInstanceVolumeType `json:"volumeType,omitempty" yaml:"volumeType,omitempty" mapstructure:"volumeType,omitempty"` } @@ -1667,10 +1824,10 @@ const ( ) type SpecKubernetesNodePoolSize struct { - // The maximum number of nodes in the node pool + // The maximum number of nodes in the node pool. Max int `json:"max" yaml:"max" mapstructure:"max"` - // The minimum number of nodes in the node pool + // The minimum number of nodes in the node pool. Min int `json:"min" yaml:"min" mapstructure:"min"` } @@ -1709,6 +1866,10 @@ type SpecPluginsHelmReleases []struct { // The chart of the release Chart string `json:"chart" yaml:"chart" mapstructure:"chart"` + // Disable running `helm diff` validation when installing the plugin, it will + // still be done when upgrading. + DisableValidationOnInstall *bool `json:"disableValidationOnInstall,omitempty" yaml:"disableValidationOnInstall,omitempty" mapstructure:"disableValidationOnInstall,omitempty"` + // The name of the release Name string `json:"name" yaml:"name" mapstructure:"name"` @@ -1759,24 +1920,26 @@ type SpecToolsConfigurationTerraform struct { State SpecToolsConfigurationTerraformState `json:"state" yaml:"state" mapstructure:"state"` } +// Configuration for storing the Terraform state of the cluster. type SpecToolsConfigurationTerraformState struct { // S3 corresponds to the JSON schema field "s3". S3 SpecToolsConfigurationTerraformStateS3 `json:"s3" yaml:"s3" mapstructure:"s3"` } +// Configuration for the S3 bucket used to store the Terraform state. type SpecToolsConfigurationTerraformStateS3 struct { - // This value defines which bucket will be used to store all the states + // This value defines which bucket will be used to store all the states. BucketName TypesAwsS3BucketName `json:"bucketName" yaml:"bucketName" mapstructure:"bucketName"` // This value defines which folder will be used to store all the states inside the - // bucket + // bucket. KeyPrefix TypesAwsS3KeyPrefix `json:"keyPrefix" yaml:"keyPrefix" mapstructure:"keyPrefix"` - // This value defines in which region the bucket is located + // This value defines in which region the bucket is located. Region TypesAwsRegion `json:"region" yaml:"region" mapstructure:"region"` // This value defines if the region of the bucket should be validated or not by - // Terraform, useful when using a bucket in a recently added region + // Terraform, useful when using a bucket in a recently added region. SkipRegionValidation *bool `json:"skipRegionValidation,omitempty" yaml:"skipRegionValidation,omitempty" mapstructure:"skipRegionValidation,omitempty"` } @@ -1810,304 +1973,821 @@ const ( TypesAwsRegionEuSouth2 TypesAwsRegion = "eu-south-2" TypesAwsRegionEuWest1 TypesAwsRegion = "eu-west-1" TypesAwsRegionEuWest2 TypesAwsRegion = "eu-west-2" + TypesAwsRegionEuWest3 TypesAwsRegion = "eu-west-3" + TypesAwsRegionMeCentral1 TypesAwsRegion = "me-central-1" + TypesAwsRegionMeSouth1 TypesAwsRegion = "me-south-1" + TypesAwsRegionSaEast1 TypesAwsRegion = "sa-east-1" + TypesAwsRegionUsEast1 TypesAwsRegion = "us-east-1" + TypesAwsRegionUsEast2 TypesAwsRegion = "us-east-2" + TypesAwsRegionUsGovEast1 TypesAwsRegion = "us-gov-east-1" + TypesAwsRegionUsGovWest1 TypesAwsRegion = "us-gov-west-1" + TypesAwsRegionUsWest1 TypesAwsRegion = "us-west-1" + TypesAwsRegionUsWest2 TypesAwsRegion = "us-west-2" ) -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolInstanceVolumeType) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_SpecKubernetesNodePoolInstanceVolumeType { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolInstanceVolumeType, v) - } - *j = SpecKubernetesNodePoolInstanceVolumeType(v) - return nil -} +type TypesAwsS3BucketName string -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesPolicyType) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_SpecDistributionModulesPolicyType { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesPolicyType, v) - } - *j = SpecDistributionModulesPolicyType(v) - return nil -} +type TypesAwsS3BucketNamePrefix string -var enumValues_SpecDistributionModulesPolicyType = []interface{}{ - "none", - "gatekeeper", - "kyverno", -} +type TypesAwsS3KeyPrefix string -var enumValues_SpecDistributionModulesTracingTempoBackend = []interface{}{ - "minio", - "externalEndpoint", -} +type TypesAwsSshPubKey string -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesTracingTempoBackend) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_SpecDistributionModulesTracingTempoBackend { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesTracingTempoBackend, v) - } - *j = SpecDistributionModulesTracingTempoBackend(v) - return nil -} +type TypesAwsSubnetId string -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesPolicyKyverno) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["installDefaultPolicies"]; !ok || v == nil { - return fmt.Errorf("field installDefaultPolicies in SpecDistributionModulesPolicyKyverno: required") - } - if v, ok := raw["validationFailureAction"]; !ok || v == nil { - return fmt.Errorf("field validationFailureAction in SpecDistributionModulesPolicyKyverno: required") - } - type Plain SpecDistributionModulesPolicyKyverno - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistributionModulesPolicyKyverno(plain) - return nil -} +type TypesAwsTags map[string]string -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesPolicyKyvernoValidationFailureAction) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction, v) - } - *j = SpecDistributionModulesPolicyKyvernoValidationFailureAction(v) - return nil -} +type TypesAwsVpcId string -var enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction = []interface{}{ - "Audit", - "Enforce", -} +type TypesCidr string -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesPolicyGatekeeper) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["enforcementAction"]; !ok || v == nil { - return fmt.Errorf("field enforcementAction in SpecDistributionModulesPolicyGatekeeper: required") - } - if v, ok := raw["installDefaultPolicies"]; !ok || v == nil { - return fmt.Errorf("field installDefaultPolicies in SpecDistributionModulesPolicyGatekeeper: required") - } - type Plain SpecDistributionModulesPolicyGatekeeper - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistributionModulesPolicyGatekeeper(plain) - return nil -} +type TypesEnvRef string -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesPolicyGatekeeperEnforcementAction) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_SpecDistributionModulesPolicyGatekeeperEnforcementAction { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesPolicyGatekeeperEnforcementAction, v) - } - *j = SpecDistributionModulesPolicyGatekeeperEnforcementAction(v) - return nil +type TypesFileRef string + +type TypesFuryModuleComponentOverrides struct { + // Set to override the node selector used to place the pods of the package. + NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` + + // Set to override the tolerations that will be added to the pods of the package. + Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` } -var enumValues_SpecDistributionModulesTracingType = []interface{}{ - "none", - "tempo", +type TypesFuryModuleComponentOverridesWithIAMRoleName struct { + // IamRoleName corresponds to the JSON schema field "iamRoleName". + IamRoleName *TypesAwsIamRoleName `json:"iamRoleName,omitempty" yaml:"iamRoleName,omitempty" mapstructure:"iamRoleName,omitempty"` + + // The node selector to use to place the pods for the load balancer controller + // module. + NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` + + // The tolerations that will be added to the pods for the cluster autoscaler + // module. + Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesTracingType) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_SpecDistributionModulesTracingType { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesTracingType, v) - } - *j = SpecDistributionModulesTracingType(v) - return nil +type TypesFuryModuleComponentOverrides_1 struct { + // NodeSelector corresponds to the JSON schema field "nodeSelector". + NodeSelector TypesKubeNodeSelector_1 `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` + + // Tolerations corresponds to the JSON schema field "tolerations". + Tolerations []TypesKubeToleration_1 `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` } -var enumValues_SpecDistributionModulesPolicyGatekeeperEnforcementAction = []interface{}{ - "deny", - "dryrun", - "warn", +// Override the common configuration with a particular configuration for the +// module. +type TypesFuryModuleOverrides struct { + // Ingresses corresponds to the JSON schema field "ingresses". + Ingresses TypesFuryModuleOverridesIngresses `json:"ingresses,omitempty" yaml:"ingresses,omitempty" mapstructure:"ingresses,omitempty"` + + // Set to override the node selector used to place the pods of the module. + NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` + + // Set to override the tolerations that will be added to the pods of the module. + Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesMonitoring) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesMonitoring: required") - } - type Plain SpecDistributionModulesMonitoring - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistributionModulesMonitoring(plain) - return nil +type TypesFuryModuleOverridesIngress struct { + // If true, the ingress will not have authentication even if + // `.spec.modules.auth.provider.type` is SSO or Basic Auth. + DisableAuth *bool `json:"disableAuth,omitempty" yaml:"disableAuth,omitempty" mapstructure:"disableAuth,omitempty"` + + // Use this host for the ingress instead of the default one. + Host *string `json:"host,omitempty" yaml:"host,omitempty" mapstructure:"host,omitempty"` + + // Use this ingress class for the ingress instead of the default one. + IngressClass *string `json:"ingressClass,omitempty" yaml:"ingressClass,omitempty" mapstructure:"ingressClass,omitempty"` } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesMonitoringType) UnmarshalJSON(b []byte) error { +type TypesFuryModuleOverridesIngresses map[string]TypesFuryModuleOverridesIngress + +type TypesIpAddress string + +type TypesKubeLabels map[string]string + +type TypesKubeLabels_1 map[string]string + +type TypesKubeNodeSelector map[string]string + +type TypesKubeNodeSelector_1 map[string]string + +type TypesKubeResources struct { + // Limits corresponds to the JSON schema field "limits". + Limits *TypesKubeResourcesLimits `json:"limits,omitempty" yaml:"limits,omitempty" mapstructure:"limits,omitempty"` + + // Requests corresponds to the JSON schema field "requests". + Requests *TypesKubeResourcesRequests `json:"requests,omitempty" yaml:"requests,omitempty" mapstructure:"requests,omitempty"` +} + +type TypesKubeResourcesLimits struct { + // The CPU limit for the Pod. Example: `1000m`. + Cpu *string `json:"cpu,omitempty" yaml:"cpu,omitempty" mapstructure:"cpu,omitempty"` + + // The memory limit for the Pod. Example: `1G`. + Memory *string `json:"memory,omitempty" yaml:"memory,omitempty" mapstructure:"memory,omitempty"` +} + +type TypesKubeResourcesRequests struct { + // The CPU request for the Pod, in cores. Example: `500m`. + Cpu *string `json:"cpu,omitempty" yaml:"cpu,omitempty" mapstructure:"cpu,omitempty"` + + // The memory request for the Pod. Example: `500M`. + Memory *string `json:"memory,omitempty" yaml:"memory,omitempty" mapstructure:"memory,omitempty"` +} + +type TypesKubeTaints []string + +type TypesKubeToleration struct { + // Effect corresponds to the JSON schema field "effect". + Effect TypesKubeTolerationEffect `json:"effect" yaml:"effect" mapstructure:"effect"` + + // The key of the toleration + Key string `json:"key" yaml:"key" mapstructure:"key"` + + // Operator corresponds to the JSON schema field "operator". + Operator *TypesKubeTolerationOperator `json:"operator,omitempty" yaml:"operator,omitempty" mapstructure:"operator,omitempty"` + + // The value of the toleration + Value *string `json:"value,omitempty" yaml:"value,omitempty" mapstructure:"value,omitempty"` +} + +type TypesKubeTolerationEffect string + +const ( + TypesKubeTolerationEffectNoExecute TypesKubeTolerationEffect = "NoExecute" + TypesKubeTolerationEffectNoSchedule TypesKubeTolerationEffect = "NoSchedule" + TypesKubeTolerationEffectPreferNoSchedule TypesKubeTolerationEffect = "PreferNoSchedule" +) + +type TypesKubeTolerationEffect_1 string + +const ( + TypesKubeTolerationEffect_1_NoExecute TypesKubeTolerationEffect_1 = "NoExecute" + TypesKubeTolerationEffect_1_NoSchedule TypesKubeTolerationEffect_1 = "NoSchedule" + TypesKubeTolerationEffect_1_PreferNoSchedule TypesKubeTolerationEffect_1 = "PreferNoSchedule" +) + +type TypesKubeTolerationOperator string + +const ( + TypesKubeTolerationOperatorEqual TypesKubeTolerationOperator = "Equal" + TypesKubeTolerationOperatorExists TypesKubeTolerationOperator = "Exists" +) + +type TypesKubeTolerationOperator_1 string + +const ( + TypesKubeTolerationOperator_1_Equal TypesKubeTolerationOperator_1 = "Equal" + TypesKubeTolerationOperator_1_Exists TypesKubeTolerationOperator_1 = "Exists" +) + +type TypesKubeToleration_1 struct { + // Effect corresponds to the JSON schema field "effect". + Effect TypesKubeTolerationEffect_1 `json:"effect" yaml:"effect" mapstructure:"effect"` + + // Key corresponds to the JSON schema field "key". + Key string `json:"key" yaml:"key" mapstructure:"key"` + + // Operator corresponds to the JSON schema field "operator". + Operator *TypesKubeTolerationOperator_1 `json:"operator,omitempty" yaml:"operator,omitempty" mapstructure:"operator,omitempty"` + + // Value corresponds to the JSON schema field "value". + Value string `json:"value" yaml:"value" mapstructure:"value"` +} + +type TypesSemVer string + +type TypesSshPubKey string + +type TypesTcpPort int + +type TypesUri string + +var enumValues_EksclusterKfdV1Alpha2Kind = []interface{}{ + "EKSCluster", +} + +var enumValues_SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior = []interface{}{ + "create", + "replace", + "merge", +} + +var enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior = []interface{}{ + "create", + "replace", + "merge", +} + +var enumValues_SpecDistributionModulesAuthProviderType = []interface{}{ + "none", + "basicAuth", + "sso", +} + +var enumValues_SpecDistributionModulesDrType = []interface{}{ + "none", + "eks", +} + +var enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType = []interface{}{ + "dns01", + "http01", +} + +var enumValues_SpecDistributionModulesIngressNginxTLSProvider = []interface{}{ + "certManager", + "secret", + "none", +} + +var enumValues_SpecDistributionModulesIngressNginxType = []interface{}{ + "none", + "single", + "dual", +} + +var enumValues_SpecDistributionModulesLoggingLokiBackend = []interface{}{ + "minio", + "externalEndpoint", +} + +var enumValues_SpecDistributionModulesLoggingOpensearchType = []interface{}{ + "single", + "triple", +} + +var enumValues_SpecDistributionModulesLoggingType = []interface{}{ + "none", + "opensearch", + "loki", + "customOutputs", +} + +var enumValues_SpecDistributionModulesMonitoringMimirBackend = []interface{}{ + "minio", + "externalEndpoint", +} + +var enumValues_SpecDistributionModulesMonitoringType = []interface{}{ + "none", + "prometheus", + "prometheusAgent", + "mimir", +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecKubernetesNodePoolAdditionalFirewallRuleSelfType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSelfType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSelfType, v) + } + *j = SpecKubernetesNodePoolAdditionalFirewallRuleSelfType(v) + return nil +} + +var enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSelfType = []interface{}{ + "ingress", + "egress", +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["cidrBlocks"]; !ok || v == nil { + return fmt.Errorf("field cidrBlocks in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required") + } + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required") + } + if v, ok := raw["ports"]; !ok || v == nil { + return fmt.Errorf("field ports in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required") + } + if v, ok := raw["protocol"]; !ok || v == nil { + return fmt.Errorf("field protocol in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required") + } + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required") + } + type Plain SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + if plain.CidrBlocks != nil && len(plain.CidrBlocks) < 1 { + return fmt.Errorf("field %s length: must be >= %d", "cidrBlocks", 1) + } + *j = SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock(plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecKubernetesNodePoolAdditionalFirewallRuleSelf) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required") + } + if v, ok := raw["ports"]; !ok || v == nil { + return fmt.Errorf("field ports in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required") + } + if v, ok := raw["protocol"]; !ok || v == nil { + return fmt.Errorf("field protocol in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required") + } + if v, ok := raw["self"]; !ok || v == nil { + return fmt.Errorf("field self in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required") + } + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required") + } + type Plain SpecKubernetesNodePoolAdditionalFirewallRuleSelf + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecKubernetesNodePoolAdditionalFirewallRuleSelf(plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType, v) + } + *j = SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType(v) + return nil +} + +var enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType = []interface{}{ + "ingress", + "egress", +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType, v) + } + *j = SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType(v) + return nil +} + +var enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType = []interface{}{ + "ingress", + "egress", +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecKubernetesNodePoolAdditionalFirewallRulePorts) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["from"]; !ok || v == nil { + return fmt.Errorf("field from in SpecKubernetesNodePoolAdditionalFirewallRulePorts: required") + } + if v, ok := raw["to"]; !ok || v == nil { + return fmt.Errorf("field to in SpecKubernetesNodePoolAdditionalFirewallRulePorts: required") + } + type Plain SpecKubernetesNodePoolAdditionalFirewallRulePorts + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecKubernetesNodePoolAdditionalFirewallRulePorts(plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecKubernetesLogsTypesElem) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesMonitoringType { + for _, expected := range enumValues_SpecKubernetesLogsTypesElem { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesLogsTypesElem, v) + } + *j = SpecKubernetesLogsTypesElem(v) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required") + } + if v, ok := raw["ports"]; !ok || v == nil { + return fmt.Errorf("field ports in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required") + } + if v, ok := raw["protocol"]; !ok || v == nil { + return fmt.Errorf("field protocol in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required") + } + if v, ok := raw["sourceSecurityGroupId"]; !ok || v == nil { + return fmt.Errorf("field sourceSecurityGroupId in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required") + } + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required") + } + type Plain SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId(plain) + return nil +} + +var enumValues_SpecKubernetesLogsTypesElem = []interface{}{ + "api", + "audit", + "authenticator", + "controllerManager", + "scheduler", +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecKubernetesNodePoolAdditionalFirewallRules) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + type Plain SpecKubernetesNodePoolAdditionalFirewallRules + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + if plain.CidrBlocks != nil && len(plain.CidrBlocks) < 1 { + return fmt.Errorf("field %s length: must be >= %d", "cidrBlocks", 1) + } + if len(plain.CidrBlocks) > 1 { + return fmt.Errorf("field %s length: must be <= %d", "cidrBlocks", 1) + } + if plain.Self != nil && len(plain.Self) < 1 { + return fmt.Errorf("field %s length: must be >= %d", "self", 1) + } + if plain.SourceSecurityGroupId != nil && len(plain.SourceSecurityGroupId) < 1 { + return fmt.Errorf("field %s length: must be >= %d", "sourceSecurityGroupId", 1) + } + *j = SpecKubernetesNodePoolAdditionalFirewallRules(plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecKubernetesLogRetentionDays) UnmarshalJSON(b []byte) error { + var v int + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecKubernetesLogRetentionDays { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesMonitoringType, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesLogRetentionDays, v) } - *j = SpecDistributionModulesMonitoringType(v) + *j = SpecKubernetesLogRetentionDays(v) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesTracing) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesNodePoolAmi) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesTracing: required") + if v, ok := raw["id"]; !ok || v == nil { + return fmt.Errorf("field id in SpecKubernetesNodePoolAmi: required") } - type Plain SpecDistributionModulesTracing + if v, ok := raw["owner"]; !ok || v == nil { + return fmt.Errorf("field owner in SpecKubernetesNodePoolAmi: required") + } + type Plain SpecKubernetesNodePoolAmi var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesTracing(plain) + *j = SpecKubernetesNodePoolAmi(plain) return nil } -var enumValues_SpecDistributionModulesMonitoringType = []interface{}{ - "none", - "prometheus", - "prometheusAgent", - "mimir", +var enumValues_SpecKubernetesLogRetentionDays = []interface{}{ + 0, + 1, + 3, + 5, + 7, + 14, + 30, + 60, + 90, + 120, + 150, + 180, + 365, + 400, + 545, + 731, + 1096, + 1827, + 2192, + 2557, + 2922, + 3288, + 3653, +} + +var enumValues_SpecKubernetesNodePoolContainerRuntime = []interface{}{ + "docker", + "containerd", } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModules) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesNodePoolContainerRuntime) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecKubernetesNodePoolContainerRuntime { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolContainerRuntime, v) + } + *j = SpecKubernetesNodePoolContainerRuntime(v) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecKubernetesAwsAuthUser) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["dr"]; !ok || v == nil { - return fmt.Errorf("field dr in SpecDistributionModules: required") + if v, ok := raw["groups"]; !ok || v == nil { + return fmt.Errorf("field groups in SpecKubernetesAwsAuthUser: required") } - if v, ok := raw["ingress"]; !ok || v == nil { - return fmt.Errorf("field ingress in SpecDistributionModules: required") + if v, ok := raw["userarn"]; !ok || v == nil { + return fmt.Errorf("field userarn in SpecKubernetesAwsAuthUser: required") } - if v, ok := raw["logging"]; !ok || v == nil { - return fmt.Errorf("field logging in SpecDistributionModules: required") + if v, ok := raw["username"]; !ok || v == nil { + return fmt.Errorf("field username in SpecKubernetesAwsAuthUser: required") } - if v, ok := raw["policy"]; !ok || v == nil { - return fmt.Errorf("field policy in SpecDistributionModules: required") + type Plain SpecKubernetesAwsAuthUser + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err } - type Plain SpecDistributionModules + *j = SpecKubernetesAwsAuthUser(plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecKubernetesAwsAuthRole) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["groups"]; !ok || v == nil { + return fmt.Errorf("field groups in SpecKubernetesAwsAuthRole: required") + } + if v, ok := raw["rolearn"]; !ok || v == nil { + return fmt.Errorf("field rolearn in SpecKubernetesAwsAuthRole: required") + } + if v, ok := raw["username"]; !ok || v == nil { + return fmt.Errorf("field username in SpecKubernetesAwsAuthRole: required") + } + type Plain SpecKubernetesAwsAuthRole var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModules(plain) + *j = SpecKubernetesAwsAuthRole(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesMonitoringMimirBackend) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesAPIServer) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["privateAccess"]; !ok || v == nil { + return fmt.Errorf("field privateAccess in SpecKubernetesAPIServer: required") + } + if v, ok := raw["publicAccess"]; !ok || v == nil { + return fmt.Errorf("field publicAccess in SpecKubernetesAPIServer: required") + } + type Plain SpecKubernetesAPIServer + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecKubernetesAPIServer(plain) + return nil +} + +var enumValues_SpecKubernetesNodePoolInstanceVolumeType = []interface{}{ + "gp2", + "gp3", + "io1", + "standard", +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecKubernetesNodePoolInstanceVolumeType) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesMonitoringMimirBackend { + for _, expected := range enumValues_SpecKubernetesNodePoolInstanceVolumeType { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesMonitoringMimirBackend, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolInstanceVolumeType, v) + } + *j = SpecKubernetesNodePoolInstanceVolumeType(v) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecInfrastructureVpn) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["ssh"]; !ok || v == nil { + return fmt.Errorf("field ssh in SpecInfrastructureVpn: required") + } + if v, ok := raw["vpnClientsSubnetCidr"]; !ok || v == nil { + return fmt.Errorf("field vpnClientsSubnetCidr in SpecInfrastructureVpn: required") + } + type Plain SpecInfrastructureVpn + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecInfrastructureVpn(plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecInfrastructureVpnSsh) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["allowedFromCidrs"]; !ok || v == nil { + return fmt.Errorf("field allowedFromCidrs in SpecInfrastructureVpnSsh: required") + } + if v, ok := raw["githubUsersName"]; !ok || v == nil { + return fmt.Errorf("field githubUsersName in SpecInfrastructureVpnSsh: required") + } + type Plain SpecInfrastructureVpnSsh + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + if plain.GithubUsersName != nil && len(plain.GithubUsersName) < 1 { + return fmt.Errorf("field %s length: must be >= %d", "githubUsersName", 1) + } + *j = SpecInfrastructureVpnSsh(plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecInfrastructureVpc) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["network"]; !ok || v == nil { + return fmt.Errorf("field network in SpecInfrastructureVpc: required") + } + type Plain SpecInfrastructureVpc + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecInfrastructureVpc(plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecInfrastructureVpcNetwork) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["cidr"]; !ok || v == nil { + return fmt.Errorf("field cidr in SpecInfrastructureVpcNetwork: required") + } + if v, ok := raw["subnetsCidrs"]; !ok || v == nil { + return fmt.Errorf("field subnetsCidrs in SpecInfrastructureVpcNetwork: required") + } + type Plain SpecInfrastructureVpcNetwork + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecInfrastructureVpcNetwork(plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecInfrastructureVpcNetworkSubnetsCidrs) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["private"]; !ok || v == nil { + return fmt.Errorf("field private in SpecInfrastructureVpcNetworkSubnetsCidrs: required") + } + if v, ok := raw["public"]; !ok || v == nil { + return fmt.Errorf("field public in SpecInfrastructureVpcNetworkSubnetsCidrs: required") + } + type Plain SpecInfrastructureVpcNetworkSubnetsCidrs + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecInfrastructureVpcNetworkSubnetsCidrs(plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecKubernetesNodePoolInstance) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecKubernetesNodePoolInstance: required") + } + type Plain SpecKubernetesNodePoolInstance + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err } - *j = SpecDistributionModulesMonitoringMimirBackend(v) + *j = SpecKubernetesNodePoolInstance(plain) return nil } @@ -2129,1023 +2809,1042 @@ func (j *SpecDistribution) UnmarshalJSON(b []byte) error { return nil } -type TypesCidr string - -var enumValues_SpecDistributionModulesMonitoringMimirBackend = []interface{}{ - "minio", - "externalEndpoint", -} - // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecInfrastructureVpcNetworkSubnetsCidrs) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModules) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["private"]; !ok || v == nil { - return fmt.Errorf("field private in SpecInfrastructureVpcNetworkSubnetsCidrs: required") + if v, ok := raw["dr"]; !ok || v == nil { + return fmt.Errorf("field dr in SpecDistributionModules: required") } - if v, ok := raw["public"]; !ok || v == nil { - return fmt.Errorf("field public in SpecInfrastructureVpcNetworkSubnetsCidrs: required") + if v, ok := raw["ingress"]; !ok || v == nil { + return fmt.Errorf("field ingress in SpecDistributionModules: required") } - type Plain SpecInfrastructureVpcNetworkSubnetsCidrs + if v, ok := raw["logging"]; !ok || v == nil { + return fmt.Errorf("field logging in SpecDistributionModules: required") + } + if v, ok := raw["policy"]; !ok || v == nil { + return fmt.Errorf("field policy in SpecDistributionModules: required") + } + type Plain SpecDistributionModules var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecInfrastructureVpcNetworkSubnetsCidrs(plain) + *j = SpecDistributionModules(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesLogging) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesNodePoolSize) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesLogging: required") + if v, ok := raw["max"]; !ok || v == nil { + return fmt.Errorf("field max in SpecKubernetesNodePoolSize: required") } - type Plain SpecDistributionModulesLogging + if v, ok := raw["min"]; !ok || v == nil { + return fmt.Errorf("field min in SpecKubernetesNodePoolSize: required") + } + type Plain SpecKubernetesNodePoolSize var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesLogging(plain) + *j = SpecKubernetesNodePoolSize(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecInfrastructureVpcNetwork) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesTracing) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["cidr"]; !ok || v == nil { - return fmt.Errorf("field cidr in SpecInfrastructureVpcNetwork: required") - } - if v, ok := raw["subnetsCidrs"]; !ok || v == nil { - return fmt.Errorf("field subnetsCidrs in SpecInfrastructureVpcNetwork: required") + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesTracing: required") } - type Plain SpecInfrastructureVpcNetwork + type Plain SpecDistributionModulesTracing var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecInfrastructureVpcNetwork(plain) + *j = SpecDistributionModulesTracing(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesLoggingType) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesTracingType) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesLoggingType { + for _, expected := range enumValues_SpecDistributionModulesTracingType { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingType, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesTracingType, v) } - *j = SpecDistributionModulesLoggingType(v) + *j = SpecDistributionModulesTracingType(v) return nil } +var enumValues_SpecDistributionModulesTracingType = []interface{}{ + "none", + "tempo", +} + +var enumValues_SpecKubernetesNodePoolType = []interface{}{ + "eks-managed", + "self-managed", +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecInfrastructureVpc) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { +func (j *SpecKubernetesNodePoolType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { return err } - if v, ok := raw["network"]; !ok || v == nil { - return fmt.Errorf("field network in SpecInfrastructureVpc: required") + var ok bool + for _, expected := range enumValues_SpecKubernetesNodePoolType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } } - type Plain SpecInfrastructureVpc - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolType, v) } - *j = SpecInfrastructureVpc(plain) + *j = SpecKubernetesNodePoolType(v) return nil } -type TypesAwsS3BucketNamePrefix string - -type TypesTcpPort int +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesTracingTempoBackend) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesTracingTempoBackend { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesTracingTempoBackend, v) + } + *j = SpecDistributionModulesTracingTempoBackend(v) + return nil +} -var enumValues_SpecDistributionModulesLoggingType = []interface{}{ - "none", - "opensearch", - "loki", - "customOutputs", +var enumValues_SpecDistributionModulesTracingTempoBackend = []interface{}{ + "minio", + "externalEndpoint", } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecInfrastructureVpnSsh) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesPolicy) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["allowedFromCidrs"]; !ok || v == nil { - return fmt.Errorf("field allowedFromCidrs in SpecInfrastructureVpnSsh: required") - } - if v, ok := raw["githubUsersName"]; !ok || v == nil { - return fmt.Errorf("field githubUsersName in SpecInfrastructureVpnSsh: required") + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesPolicy: required") } - type Plain SpecInfrastructureVpnSsh + type Plain SpecDistributionModulesPolicy var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - if plain.GithubUsersName != nil && len(plain.GithubUsersName) < 1 { - return fmt.Errorf("field %s length: must be >= %d", "githubUsersName", 1) - } - *j = SpecInfrastructureVpnSsh(plain) + *j = SpecDistributionModulesPolicy(plain) return nil } -type TypesAwsVpcId string - // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesLoggingOpensearch) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesNodePool) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesLoggingOpensearch: required") + if v, ok := raw["instance"]; !ok || v == nil { + return fmt.Errorf("field instance in SpecKubernetesNodePool: required") } - type Plain SpecDistributionModulesLoggingOpensearch + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecKubernetesNodePool: required") + } + if v, ok := raw["size"]; !ok || v == nil { + return fmt.Errorf("field size in SpecKubernetesNodePool: required") + } + type Plain SpecKubernetesNodePool var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesLoggingOpensearch(plain) + *j = SpecKubernetesNodePool(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecInfrastructureVpn) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesPolicyType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesPolicyType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesPolicyType, v) + } + *j = SpecDistributionModulesPolicyType(v) + return nil +} + +var enumValues_SpecKubernetesNodePoolsLaunchKind = []interface{}{ + "launch_configurations", + "launch_templates", + "both", +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecKubernetesNodePoolsLaunchKind) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecKubernetesNodePoolsLaunchKind { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolsLaunchKind, v) + } + *j = SpecKubernetesNodePoolsLaunchKind(v) + return nil +} + +var enumValues_SpecDistributionModulesPolicyType = []interface{}{ + "none", + "gatekeeper", + "kyverno", +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesPolicyKyverno) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["ssh"]; !ok || v == nil { - return fmt.Errorf("field ssh in SpecInfrastructureVpn: required") + if v, ok := raw["installDefaultPolicies"]; !ok || v == nil { + return fmt.Errorf("field installDefaultPolicies in SpecDistributionModulesPolicyKyverno: required") } - if v, ok := raw["vpnClientsSubnetCidr"]; !ok || v == nil { - return fmt.Errorf("field vpnClientsSubnetCidr in SpecInfrastructureVpn: required") + if v, ok := raw["validationFailureAction"]; !ok || v == nil { + return fmt.Errorf("field validationFailureAction in SpecDistributionModulesPolicyKyverno: required") } - type Plain SpecInfrastructureVpn + type Plain SpecDistributionModulesPolicyKyverno var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecInfrastructureVpn(plain) + *j = SpecDistributionModulesPolicyKyverno(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesLoggingOpensearchType) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesPolicyKyvernoValidationFailureAction) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesLoggingOpensearchType { + for _, expected := range enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingOpensearchType, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction, v) } - *j = SpecDistributionModulesLoggingOpensearchType(v) + *j = SpecDistributionModulesPolicyKyvernoValidationFailureAction(v) return nil } -var enumValues_SpecDistributionModulesLoggingOpensearchType = []interface{}{ - "single", - "triple", +var enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction = []interface{}{ + "Audit", + "Enforce", } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesAPIServer) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetes) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["privateAccess"]; !ok || v == nil { - return fmt.Errorf("field privateAccess in SpecKubernetesAPIServer: required") + if v, ok := raw["apiServer"]; !ok || v == nil { + return fmt.Errorf("field apiServer in SpecKubernetes: required") } - if v, ok := raw["publicAccess"]; !ok || v == nil { - return fmt.Errorf("field publicAccess in SpecKubernetesAPIServer: required") + if v, ok := raw["nodeAllowedSshPublicKey"]; !ok || v == nil { + return fmt.Errorf("field nodeAllowedSshPublicKey in SpecKubernetes: required") + } + if v, ok := raw["nodePools"]; !ok || v == nil { + return fmt.Errorf("field nodePools in SpecKubernetes: required") + } + if v, ok := raw["nodePoolsLaunchKind"]; !ok || v == nil { + return fmt.Errorf("field nodePoolsLaunchKind in SpecKubernetes: required") } - type Plain SpecKubernetesAPIServer + type Plain SpecKubernetes var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecKubernetesAPIServer(plain) + *j = SpecKubernetes(plain) return nil } -type TypesKubeResources struct { - // Limits corresponds to the JSON schema field "limits". - Limits *TypesKubeResourcesLimits `json:"limits,omitempty" yaml:"limits,omitempty" mapstructure:"limits,omitempty"` - - // Requests corresponds to the JSON schema field "requests". - Requests *TypesKubeResourcesRequests `json:"requests,omitempty" yaml:"requests,omitempty" mapstructure:"requests,omitempty"` -} - -type TypesKubeResourcesRequests struct { - // The cpu request for the prometheus pods - Cpu *string `json:"cpu,omitempty" yaml:"cpu,omitempty" mapstructure:"cpu,omitempty"` - - // The memory request for the opensearch pods - Memory *string `json:"memory,omitempty" yaml:"memory,omitempty" mapstructure:"memory,omitempty"` -} - // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesAwsAuthRole) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesPolicyGatekeeper) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["groups"]; !ok || v == nil { - return fmt.Errorf("field groups in SpecKubernetesAwsAuthRole: required") - } - if v, ok := raw["rolearn"]; !ok || v == nil { - return fmt.Errorf("field rolearn in SpecKubernetesAwsAuthRole: required") + if v, ok := raw["enforcementAction"]; !ok || v == nil { + return fmt.Errorf("field enforcementAction in SpecDistributionModulesPolicyGatekeeper: required") } - if v, ok := raw["username"]; !ok || v == nil { - return fmt.Errorf("field username in SpecKubernetesAwsAuthRole: required") + if v, ok := raw["installDefaultPolicies"]; !ok || v == nil { + return fmt.Errorf("field installDefaultPolicies in SpecDistributionModulesPolicyGatekeeper: required") } - type Plain SpecKubernetesAwsAuthRole + type Plain SpecDistributionModulesPolicyGatekeeper var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecKubernetesAwsAuthRole(plain) + *j = SpecDistributionModulesPolicyGatekeeper(plain) return nil } -type TypesKubeResourcesLimits struct { - // The cpu limit for the opensearch pods - Cpu *string `json:"cpu,omitempty" yaml:"cpu,omitempty" mapstructure:"cpu,omitempty"` - - // The memory limit for the opensearch pods - Memory *string `json:"memory,omitempty" yaml:"memory,omitempty" mapstructure:"memory,omitempty"` -} - // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesAwsAuthUser) UnmarshalJSON(b []byte) error { +func (j *SpecPluginsHelmReleasesElemSetElem) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["groups"]; !ok || v == nil { - return fmt.Errorf("field groups in SpecKubernetesAwsAuthUser: required") - } - if v, ok := raw["userarn"]; !ok || v == nil { - return fmt.Errorf("field userarn in SpecKubernetesAwsAuthUser: required") + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecPluginsHelmReleasesElemSetElem: required") } - if v, ok := raw["username"]; !ok || v == nil { - return fmt.Errorf("field username in SpecKubernetesAwsAuthUser: required") + if v, ok := raw["value"]; !ok || v == nil { + return fmt.Errorf("field value in SpecPluginsHelmReleasesElemSetElem: required") } - type Plain SpecKubernetesAwsAuthUser + type Plain SpecPluginsHelmReleasesElemSetElem var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecKubernetesAwsAuthUser(plain) + *j = SpecPluginsHelmReleasesElemSetElem(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesLoggingLokiBackend) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesPolicyGatekeeperEnforcementAction) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesLoggingLokiBackend { + for _, expected := range enumValues_SpecDistributionModulesPolicyGatekeeperEnforcementAction { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingLokiBackend, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesPolicyGatekeeperEnforcementAction, v) } - *j = SpecDistributionModulesLoggingLokiBackend(v) + *j = SpecDistributionModulesPolicyGatekeeperEnforcementAction(v) return nil } -var enumValues_SpecDistributionModulesLoggingLokiBackend = []interface{}{ - "minio", - "externalEndpoint", +var enumValues_SpecDistributionModulesPolicyGatekeeperEnforcementAction = []interface{}{ + "deny", + "dryrun", + "warn", } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesLoggingCustomOutputs) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesMonitoring) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["audit"]; !ok || v == nil { - return fmt.Errorf("field audit in SpecDistributionModulesLoggingCustomOutputs: required") - } - if v, ok := raw["errors"]; !ok || v == nil { - return fmt.Errorf("field errors in SpecDistributionModulesLoggingCustomOutputs: required") - } - if v, ok := raw["events"]; !ok || v == nil { - return fmt.Errorf("field events in SpecDistributionModulesLoggingCustomOutputs: required") - } - if v, ok := raw["infra"]; !ok || v == nil { - return fmt.Errorf("field infra in SpecDistributionModulesLoggingCustomOutputs: required") - } - if v, ok := raw["ingressNginx"]; !ok || v == nil { - return fmt.Errorf("field ingressNginx in SpecDistributionModulesLoggingCustomOutputs: required") - } - if v, ok := raw["kubernetes"]; !ok || v == nil { - return fmt.Errorf("field kubernetes in SpecDistributionModulesLoggingCustomOutputs: required") - } - if v, ok := raw["systemdCommon"]; !ok || v == nil { - return fmt.Errorf("field systemdCommon in SpecDistributionModulesLoggingCustomOutputs: required") - } - if v, ok := raw["systemdEtcd"]; !ok || v == nil { - return fmt.Errorf("field systemdEtcd in SpecDistributionModulesLoggingCustomOutputs: required") + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesMonitoring: required") } - type Plain SpecDistributionModulesLoggingCustomOutputs + type Plain SpecDistributionModulesMonitoring var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesLoggingCustomOutputs(plain) + *j = SpecDistributionModulesMonitoring(plain) return nil } -var enumValues_SpecKubernetesLogsTypesElem = []interface{}{ - "api", - "audit", - "authenticator", - "controllerManager", - "scheduler", -} - // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesLogsTypesElem) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesMonitoringType) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecKubernetesLogsTypesElem { + for _, expected := range enumValues_SpecDistributionModulesMonitoringType { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesLogsTypesElem, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesMonitoringType, v) } - *j = SpecKubernetesLogsTypesElem(v) + *j = SpecDistributionModulesMonitoringType(v) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngress) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { +func (j *SpecDistributionModulesMonitoringMimirBackend) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { return err } - if v, ok := raw["baseDomain"]; !ok || v == nil { - return fmt.Errorf("field baseDomain in SpecDistributionModulesIngress: required") - } - if v, ok := raw["dns"]; !ok || v == nil { - return fmt.Errorf("field dns in SpecDistributionModulesIngress: required") - } - if v, ok := raw["nginx"]; !ok || v == nil { - return fmt.Errorf("field nginx in SpecDistributionModulesIngress: required") + var ok bool + for _, expected := range enumValues_SpecDistributionModulesMonitoringMimirBackend { + if reflect.DeepEqual(v, expected) { + ok = true + break + } } - type Plain SpecDistributionModulesIngress - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesMonitoringMimirBackend, v) } - *j = SpecDistributionModulesIngress(plain) + *j = SpecDistributionModulesMonitoringMimirBackend(v) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressNginx) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesLogging) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesIngressNginx: required") + return fmt.Errorf("field type in SpecDistributionModulesLogging: required") } - type Plain SpecDistributionModulesIngressNginx + type Plain SpecDistributionModulesLogging var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesIngressNginx(plain) + *j = SpecDistributionModulesLogging(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressNginxType) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesLoggingType) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesIngressNginxType { + for _, expected := range enumValues_SpecDistributionModulesLoggingType { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressNginxType, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingType, v) } - *j = SpecDistributionModulesIngressNginxType(v) + *j = SpecDistributionModulesLoggingType(v) return nil } -var enumValues_SpecDistributionModulesIngressNginxType = []interface{}{ - "none", - "single", - "dual", -} - // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressNginxTLS) UnmarshalJSON(b []byte) error { +func (j *SpecToolsConfigurationTerraformStateS3) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["provider"]; !ok || v == nil { - return fmt.Errorf("field provider in SpecDistributionModulesIngressNginxTLS: required") + if v, ok := raw["bucketName"]; !ok || v == nil { + return fmt.Errorf("field bucketName in SpecToolsConfigurationTerraformStateS3: required") } - type Plain SpecDistributionModulesIngressNginxTLS + if v, ok := raw["keyPrefix"]; !ok || v == nil { + return fmt.Errorf("field keyPrefix in SpecToolsConfigurationTerraformStateS3: required") + } + if v, ok := raw["region"]; !ok || v == nil { + return fmt.Errorf("field region in SpecToolsConfigurationTerraformStateS3: required") + } + type Plain SpecToolsConfigurationTerraformStateS3 var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesIngressNginxTLS(plain) + *j = SpecToolsConfigurationTerraformStateS3(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressNginxTLSSecret) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesLoggingOpensearch) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["ca"]; !ok || v == nil { - return fmt.Errorf("field ca in SpecDistributionModulesIngressNginxTLSSecret: required") - } - if v, ok := raw["cert"]; !ok || v == nil { - return fmt.Errorf("field cert in SpecDistributionModulesIngressNginxTLSSecret: required") - } - if v, ok := raw["key"]; !ok || v == nil { - return fmt.Errorf("field key in SpecDistributionModulesIngressNginxTLSSecret: required") + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesLoggingOpensearch: required") } - type Plain SpecDistributionModulesIngressNginxTLSSecret + type Plain SpecDistributionModulesLoggingOpensearch var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesIngressNginxTLSSecret(plain) + *j = SpecDistributionModulesLoggingOpensearch(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolAdditionalFirewallRulePorts) UnmarshalJSON(b []byte) error { +func (j *SpecToolsConfigurationTerraformState) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["from"]; !ok || v == nil { - return fmt.Errorf("field from in SpecKubernetesNodePoolAdditionalFirewallRulePorts: required") - } - if v, ok := raw["to"]; !ok || v == nil { - return fmt.Errorf("field to in SpecKubernetesNodePoolAdditionalFirewallRulePorts: required") + if v, ok := raw["s3"]; !ok || v == nil { + return fmt.Errorf("field s3 in SpecToolsConfigurationTerraformState: required") } - type Plain SpecKubernetesNodePoolAdditionalFirewallRulePorts + type Plain SpecToolsConfigurationTerraformState var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecKubernetesNodePoolAdditionalFirewallRulePorts(plain) + *j = SpecToolsConfigurationTerraformState(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressNginxTLSProvider) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesLoggingOpensearchType) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesIngressNginxTLSProvider { + for _, expected := range enumValues_SpecDistributionModulesLoggingOpensearchType { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressNginxTLSProvider, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingOpensearchType, v) } - *j = SpecDistributionModulesIngressNginxTLSProvider(v) + *j = SpecDistributionModulesLoggingOpensearchType(v) return nil } -type TypesAwsTags map[string]string - -var enumValues_SpecDistributionModulesIngressNginxTLSProvider = []interface{}{ - "certManager", - "secret", - "none", -} - -var enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType = []interface{}{ - "ingress", - "egress", +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecToolsConfigurationTerraform) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["state"]; !ok || v == nil { + return fmt.Errorf("field state in SpecToolsConfigurationTerraform: required") + } + type Plain SpecToolsConfigurationTerraform + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecToolsConfigurationTerraform(plain) + return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesLoggingLokiBackend) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType { + for _, expected := range enumValues_SpecDistributionModulesLoggingLokiBackend { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingLokiBackend, v) } - *j = SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType(v) + *j = SpecDistributionModulesLoggingLokiBackend(v) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressDNS) UnmarshalJSON(b []byte) error { +func (j *SpecToolsConfiguration) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["private"]; !ok || v == nil { - return fmt.Errorf("field private in SpecDistributionModulesIngressDNS: required") - } - if v, ok := raw["public"]; !ok || v == nil { - return fmt.Errorf("field public in SpecDistributionModulesIngressDNS: required") + if v, ok := raw["terraform"]; !ok || v == nil { + return fmt.Errorf("field terraform in SpecToolsConfiguration: required") } - type Plain SpecDistributionModulesIngressDNS + type Plain SpecToolsConfiguration var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesIngressDNS(plain) + *j = SpecToolsConfiguration(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressDNSPublic) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesLoggingCustomOutputs) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["create"]; !ok || v == nil { - return fmt.Errorf("field create in SpecDistributionModulesIngressDNSPublic: required") + if v, ok := raw["audit"]; !ok || v == nil { + return fmt.Errorf("field audit in SpecDistributionModulesLoggingCustomOutputs: required") } - if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecDistributionModulesIngressDNSPublic: required") + if v, ok := raw["errors"]; !ok || v == nil { + return fmt.Errorf("field errors in SpecDistributionModulesLoggingCustomOutputs: required") } - type Plain SpecDistributionModulesIngressDNSPublic + if v, ok := raw["events"]; !ok || v == nil { + return fmt.Errorf("field events in SpecDistributionModulesLoggingCustomOutputs: required") + } + if v, ok := raw["infra"]; !ok || v == nil { + return fmt.Errorf("field infra in SpecDistributionModulesLoggingCustomOutputs: required") + } + if v, ok := raw["ingressNginx"]; !ok || v == nil { + return fmt.Errorf("field ingressNginx in SpecDistributionModulesLoggingCustomOutputs: required") + } + if v, ok := raw["kubernetes"]; !ok || v == nil { + return fmt.Errorf("field kubernetes in SpecDistributionModulesLoggingCustomOutputs: required") + } + if v, ok := raw["systemdCommon"]; !ok || v == nil { + return fmt.Errorf("field systemdCommon in SpecDistributionModulesLoggingCustomOutputs: required") + } + if v, ok := raw["systemdEtcd"]; !ok || v == nil { + return fmt.Errorf("field systemdEtcd in SpecDistributionModulesLoggingCustomOutputs: required") + } + type Plain SpecDistributionModulesLoggingCustomOutputs var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesIngressDNSPublic(plain) + *j = SpecDistributionModulesLoggingCustomOutputs(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressDNSPrivate) UnmarshalJSON(b []byte) error { +func (j *Spec) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["create"]; !ok || v == nil { - return fmt.Errorf("field create in SpecDistributionModulesIngressDNSPrivate: required") + if v, ok := raw["distribution"]; !ok || v == nil { + return fmt.Errorf("field distribution in Spec: required") } - if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecDistributionModulesIngressDNSPrivate: required") + if v, ok := raw["distributionVersion"]; !ok || v == nil { + return fmt.Errorf("field distributionVersion in Spec: required") } - type Plain SpecDistributionModulesIngressDNSPrivate + if v, ok := raw["kubernetes"]; !ok || v == nil { + return fmt.Errorf("field kubernetes in Spec: required") + } + if v, ok := raw["region"]; !ok || v == nil { + return fmt.Errorf("field region in Spec: required") + } + if v, ok := raw["toolsConfiguration"]; !ok || v == nil { + return fmt.Errorf("field toolsConfiguration in Spec: required") + } + type Plain Spec var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesIngressDNSPrivate(plain) + if len(plain.DistributionVersion) < 1 { + return fmt.Errorf("field %s length: must be >= %d", "distributionVersion", 1) + } + *j = Spec(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesIngress) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["cidrBlocks"]; !ok || v == nil { - return fmt.Errorf("field cidrBlocks in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required") - } - if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required") - } - if v, ok := raw["ports"]; !ok || v == nil { - return fmt.Errorf("field ports in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required") + if v, ok := raw["baseDomain"]; !ok || v == nil { + return fmt.Errorf("field baseDomain in SpecDistributionModulesIngress: required") } - if v, ok := raw["protocol"]; !ok || v == nil { - return fmt.Errorf("field protocol in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required") + if v, ok := raw["dns"]; !ok || v == nil { + return fmt.Errorf("field dns in SpecDistributionModulesIngress: required") } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required") + if v, ok := raw["nginx"]; !ok || v == nil { + return fmt.Errorf("field nginx in SpecDistributionModulesIngress: required") } - type Plain SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock + type Plain SpecDistributionModulesIngress var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - if plain.CidrBlocks != nil && len(plain.CidrBlocks) < 1 { - return fmt.Errorf("field %s length: must be >= %d", "cidrBlocks", 1) - } - *j = SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock(plain) + *j = SpecDistributionModulesIngress(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressCertManager) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesIngressNginx) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["clusterIssuer"]; !ok || v == nil { - return fmt.Errorf("field clusterIssuer in SpecDistributionModulesIngressCertManager: required") + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesIngressNginx: required") } - type Plain SpecDistributionModulesIngressCertManager + type Plain SpecDistributionModulesIngressNginx var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesIngressCertManager(plain) + *j = SpecDistributionModulesIngressNginx(plain) return nil } -var enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSelfType = []interface{}{ - "ingress", - "egress", -} - // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolAdditionalFirewallRuleSelfType) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesIngressNginxType) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSelfType { + for _, expected := range enumValues_SpecDistributionModulesIngressNginxType { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSelfType, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressNginxType, v) } - *j = SpecKubernetesNodePoolAdditionalFirewallRuleSelfType(v) + *j = SpecDistributionModulesIngressNginxType(v) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressCertManagerClusterIssuer) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesIngressNginxTLS) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["email"]; !ok || v == nil { - return fmt.Errorf("field email in SpecDistributionModulesIngressCertManagerClusterIssuer: required") + if v, ok := raw["provider"]; !ok || v == nil { + return fmt.Errorf("field provider in SpecDistributionModulesIngressNginxTLS: required") } - if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecDistributionModulesIngressCertManagerClusterIssuer: required") + type Plain SpecDistributionModulesIngressNginxTLS + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err } - type Plain SpecDistributionModulesIngressCertManagerClusterIssuer + *j = SpecDistributionModulesIngressNginxTLS(plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesIngressNginxTLSSecret) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["ca"]; !ok || v == nil { + return fmt.Errorf("field ca in SpecDistributionModulesIngressNginxTLSSecret: required") + } + if v, ok := raw["cert"]; !ok || v == nil { + return fmt.Errorf("field cert in SpecDistributionModulesIngressNginxTLSSecret: required") + } + if v, ok := raw["key"]; !ok || v == nil { + return fmt.Errorf("field key in SpecDistributionModulesIngressNginxTLSSecret: required") + } + type Plain SpecDistributionModulesIngressNginxTLSSecret var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesIngressCertManagerClusterIssuer(plain) + *j = SpecDistributionModulesIngressNginxTLSSecret(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressCertManagerClusterIssuerType) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesIngressNginxTLSProvider) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType { + for _, expected := range enumValues_SpecDistributionModulesIngressNginxTLSProvider { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressNginxTLSProvider, v) } - *j = SpecDistributionModulesIngressCertManagerClusterIssuerType(v) + *j = SpecDistributionModulesIngressNginxTLSProvider(v) return nil } -var enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType = []interface{}{ - "dns01", - "http01", -} - // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolAdditionalFirewallRuleSelf) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesIngressDNS) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required") - } - if v, ok := raw["ports"]; !ok || v == nil { - return fmt.Errorf("field ports in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required") - } - if v, ok := raw["protocol"]; !ok || v == nil { - return fmt.Errorf("field protocol in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required") - } - if v, ok := raw["self"]; !ok || v == nil { - return fmt.Errorf("field self in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required") + if v, ok := raw["private"]; !ok || v == nil { + return fmt.Errorf("field private in SpecDistributionModulesIngressDNS: required") } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required") + if v, ok := raw["public"]; !ok || v == nil { + return fmt.Errorf("field public in SpecDistributionModulesIngressDNS: required") } - type Plain SpecKubernetesNodePoolAdditionalFirewallRuleSelf + type Plain SpecDistributionModulesIngressDNS var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecKubernetesNodePoolAdditionalFirewallRuleSelf(plain) + *j = SpecDistributionModulesIngressDNS(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesDr) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesIngressDNSPublic) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesDr: required") + if v, ok := raw["create"]; !ok || v == nil { + return fmt.Errorf("field create in SpecDistributionModulesIngressDNSPublic: required") } - type Plain SpecDistributionModulesDr + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecDistributionModulesIngressDNSPublic: required") + } + type Plain SpecDistributionModulesIngressDNSPublic var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesDr(plain) + *j = SpecDistributionModulesIngressDNSPublic(plain) return nil } -var enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType = []interface{}{ - "ingress", - "egress", -} - // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { +func (j *SpecDistributionModulesIngressDNSPrivate) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { return err } - var ok bool - for _, expected := range enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType { - if reflect.DeepEqual(v, expected) { - ok = true - break - } + if v, ok := raw["create"]; !ok || v == nil { + return fmt.Errorf("field create in SpecDistributionModulesIngressDNSPrivate: required") } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType, v) + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecDistributionModulesIngressDNSPrivate: required") } - *j = SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType(v) + type Plain SpecDistributionModulesIngressDNSPrivate + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesIngressDNSPrivate(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesDrVelero) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesIngressCertManager) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["eks"]; !ok || v == nil { - return fmt.Errorf("field eks in SpecDistributionModulesDrVelero: required") + if v, ok := raw["clusterIssuer"]; !ok || v == nil { + return fmt.Errorf("field clusterIssuer in SpecDistributionModulesIngressCertManager: required") } - type Plain SpecDistributionModulesDrVelero + type Plain SpecDistributionModulesIngressCertManager var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesDrVelero(plain) + *j = SpecDistributionModulesIngressCertManager(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesDrVeleroEks) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesIngressCertManagerClusterIssuer) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["bucketName"]; !ok || v == nil { - return fmt.Errorf("field bucketName in SpecDistributionModulesDrVeleroEks: required") + if v, ok := raw["email"]; !ok || v == nil { + return fmt.Errorf("field email in SpecDistributionModulesIngressCertManagerClusterIssuer: required") } - if v, ok := raw["region"]; !ok || v == nil { - return fmt.Errorf("field region in SpecDistributionModulesDrVeleroEks: required") + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecDistributionModulesIngressCertManagerClusterIssuer: required") } - type Plain SpecDistributionModulesDrVeleroEks + type Plain SpecDistributionModulesIngressCertManagerClusterIssuer var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesDrVeleroEks(plain) + *j = SpecDistributionModulesIngressCertManagerClusterIssuer(plain) return nil } -const TypesAwsRegionUsWest2 TypesAwsRegion = "us-west-2" +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesIngressCertManagerClusterIssuerType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType, v) + } + *j = SpecDistributionModulesIngressCertManagerClusterIssuerType(v) + return nil +} // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesAuthPomeriumSecrets) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required") - } - if v, ok := raw["ports"]; !ok || v == nil { - return fmt.Errorf("field ports in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required") + if v, ok := raw["COOKIE_SECRET"]; !ok || v == nil { + return fmt.Errorf("field COOKIE_SECRET in SpecDistributionModulesAuthPomeriumSecrets: required") } - if v, ok := raw["protocol"]; !ok || v == nil { - return fmt.Errorf("field protocol in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required") + if v, ok := raw["IDP_CLIENT_SECRET"]; !ok || v == nil { + return fmt.Errorf("field IDP_CLIENT_SECRET in SpecDistributionModulesAuthPomeriumSecrets: required") } - if v, ok := raw["sourceSecurityGroupId"]; !ok || v == nil { - return fmt.Errorf("field sourceSecurityGroupId in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required") + if v, ok := raw["SHARED_SECRET"]; !ok || v == nil { + return fmt.Errorf("field SHARED_SECRET in SpecDistributionModulesAuthPomeriumSecrets: required") } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required") + if v, ok := raw["SIGNING_KEY"]; !ok || v == nil { + return fmt.Errorf("field SIGNING_KEY in SpecDistributionModulesAuthPomeriumSecrets: required") } - type Plain SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId + type Plain SpecDistributionModulesAuthPomeriumSecrets var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId(plain) + *j = SpecDistributionModulesAuthPomeriumSecrets(plain) return nil } -const TypesAwsRegionUsWest1 TypesAwsRegion = "us-west-1" - // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolAdditionalFirewallRules) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesDr) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - type Plain SpecKubernetesNodePoolAdditionalFirewallRules + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesDr: required") + } + type Plain SpecDistributionModulesDr var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - if plain.CidrBlocks != nil && len(plain.CidrBlocks) < 1 { - return fmt.Errorf("field %s length: must be >= %d", "cidrBlocks", 1) - } - if plain.Self != nil && len(plain.Self) < 1 { - return fmt.Errorf("field %s length: must be >= %d", "self", 1) - } - if plain.SourceSecurityGroupId != nil && len(plain.SourceSecurityGroupId) < 1 { - return fmt.Errorf("field %s length: must be >= %d", "sourceSecurityGroupId", 1) - } - *j = SpecKubernetesNodePoolAdditionalFirewallRules(plain) + *j = SpecDistributionModulesDr(plain) return nil } -const TypesAwsRegionUsGovWest1 TypesAwsRegion = "us-gov-west-1" - // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolAmi) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesDrVelero) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["id"]; !ok || v == nil { - return fmt.Errorf("field id in SpecKubernetesNodePoolAmi: required") - } - if v, ok := raw["owner"]; !ok || v == nil { - return fmt.Errorf("field owner in SpecKubernetesNodePoolAmi: required") + if v, ok := raw["eks"]; !ok || v == nil { + return fmt.Errorf("field eks in SpecDistributionModulesDrVelero: required") } - type Plain SpecKubernetesNodePoolAmi + type Plain SpecDistributionModulesDrVelero var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecKubernetesNodePoolAmi(plain) + *j = SpecDistributionModulesDrVelero(plain) return nil } -const TypesAwsRegionUsGovEast1 TypesAwsRegion = "us-gov-east-1" - -var enumValues_SpecKubernetesNodePoolContainerRuntime = []interface{}{ - "docker", - "containerd", +var enumValues_TypesKubeTolerationEffect_1 = []interface{}{ + "NoSchedule", + "PreferNoSchedule", + "NoExecute", } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolContainerRuntime) UnmarshalJSON(b []byte) error { +func (j *TypesKubeTolerationEffect_1) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecKubernetesNodePoolContainerRuntime { + for _, expected := range enumValues_TypesKubeTolerationEffect_1 { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolContainerRuntime, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesKubeTolerationEffect_1, v) } - *j = SpecKubernetesNodePoolContainerRuntime(v) + *j = TypesKubeTolerationEffect_1(v) return nil } -const ( - TypesAwsRegionSaEast1 TypesAwsRegion = "sa-east-1" - TypesAwsRegionUsEast1 TypesAwsRegion = "us-east-1" - TypesAwsRegionUsEast2 TypesAwsRegion = "us-east-2" -) - -var enumValues_SpecKubernetesNodePoolInstanceVolumeType = []interface{}{ - "gp2", - "gp3", - "io1", - "standard", -} - // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesPolicy) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesDrVeleroEks) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesPolicy: required") + if v, ok := raw["bucketName"]; !ok || v == nil { + return fmt.Errorf("field bucketName in SpecDistributionModulesDrVeleroEks: required") } - type Plain SpecDistributionModulesPolicy + if v, ok := raw["region"]; !ok || v == nil { + return fmt.Errorf("field region in SpecDistributionModulesDrVeleroEks: required") + } + type Plain SpecDistributionModulesDrVeleroEks var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesPolicy(plain) + *j = SpecDistributionModulesDrVeleroEks(plain) return nil } -const ( - TypesAwsRegionEuWest3 TypesAwsRegion = "eu-west-3" - TypesAwsRegionMeCentral1 TypesAwsRegion = "me-central-1" - TypesAwsRegionMeSouth1 TypesAwsRegion = "me-south-1" -) - // UnmarshalJSON implements json.Unmarshaler. func (j *TypesAwsRegion) UnmarshalJSON(b []byte) error { var v string @@ -3198,53 +3897,6 @@ var enumValues_TypesAwsRegion = []interface{}{ "us-west-2", } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolInstance) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecKubernetesNodePoolInstance: required") - } - type Plain SpecKubernetesNodePoolInstance - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecKubernetesNodePoolInstance(plain) - return nil -} - -type TypesAwsS3BucketName string - -type TypesKubeLabels_1 map[string]string - -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolSize) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["max"]; !ok || v == nil { - return fmt.Errorf("field max in SpecKubernetesNodePoolSize: required") - } - if v, ok := raw["min"]; !ok || v == nil { - return fmt.Errorf("field min in SpecKubernetesNodePoolSize: required") - } - type Plain SpecKubernetesNodePoolSize - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecKubernetesNodePoolSize(plain) - return nil -} - -type TypesAwsSubnetId string - -type TypesKubeTaints []string - // UnmarshalJSON implements json.Unmarshaler. func (j *SpecDistributionModulesDrType) UnmarshalJSON(b []byte) error { var v string @@ -3265,123 +3917,31 @@ func (j *SpecDistributionModulesDrType) UnmarshalJSON(b []byte) error { return nil } -var enumValues_SpecKubernetesNodePoolType = []interface{}{ - "eks-managed", - "self-managed", -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolType) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_SpecKubernetesNodePoolType { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolType, v) - } - *j = SpecKubernetesNodePoolType(v) - return nil -} - -var enumValues_SpecDistributionModulesDrType = []interface{}{ - "none", - "eks", -} - -type TypesFuryModuleOverrides struct { - // Ingresses corresponds to the JSON schema field "ingresses". - Ingresses TypesFuryModuleOverridesIngresses `json:"ingresses,omitempty" yaml:"ingresses,omitempty" mapstructure:"ingresses,omitempty"` - - // The node selector to use to place the pods for the dr module - NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` - - // The tolerations that will be added to the pods for the monitoring module - Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` -} - -type TypesFuryModuleOverridesIngresses map[string]TypesFuryModuleOverridesIngress - -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePool) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["instance"]; !ok || v == nil { - return fmt.Errorf("field instance in SpecKubernetesNodePool: required") - } - if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecKubernetesNodePool: required") - } - if v, ok := raw["size"]; !ok || v == nil { - return fmt.Errorf("field size in SpecKubernetesNodePool: required") - } - type Plain SpecKubernetesNodePool - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecKubernetesNodePool(plain) - return nil -} - -type TypesFuryModuleOverridesIngress struct { - // If true, the ingress will not have authentication - DisableAuth *bool `json:"disableAuth,omitempty" yaml:"disableAuth,omitempty" mapstructure:"disableAuth,omitempty"` - - // The host of the ingress - Host *string `json:"host,omitempty" yaml:"host,omitempty" mapstructure:"host,omitempty"` - - // The ingress class of the ingress - IngressClass *string `json:"ingressClass,omitempty" yaml:"ingressClass,omitempty" mapstructure:"ingressClass,omitempty"` -} - -var enumValues_SpecKubernetesNodePoolsLaunchKind = []interface{}{ - "launch_configurations", - "launch_templates", - "both", +var enumValues_TypesKubeTolerationOperator_1 = []interface{}{ + "Exists", + "Equal", } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolsLaunchKind) UnmarshalJSON(b []byte) error { +func (j *TypesKubeTolerationOperator_1) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecKubernetesNodePoolsLaunchKind { + for _, expected := range enumValues_TypesKubeTolerationOperator_1 { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolsLaunchKind, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesKubeTolerationOperator_1, v) } - *j = SpecKubernetesNodePoolsLaunchKind(v) + *j = TypesKubeTolerationOperator_1(v) return nil } -type TypesFuryModuleComponentOverridesWithIAMRoleName struct { - // IamRoleName corresponds to the JSON schema field "iamRoleName". - IamRoleName *TypesAwsIamRoleName `json:"iamRoleName,omitempty" yaml:"iamRoleName,omitempty" mapstructure:"iamRoleName,omitempty"` - - // The node selector to use to place the pods for the load balancer controller - // module - NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` - - // The tolerations that will be added to the pods for the cluster autoscaler - // module - Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` -} - // UnmarshalJSON implements json.Unmarshaler. func (j *SpecDistributionModulesAuth) UnmarshalJSON(b []byte) error { var raw map[string]interface{} @@ -3439,56 +3999,26 @@ func (j *SpecDistributionModulesAuthProviderType) UnmarshalJSON(b []byte) error } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetes) UnmarshalJSON(b []byte) error { +func (j *TypesKubeToleration_1) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["apiServer"]; !ok || v == nil { - return fmt.Errorf("field apiServer in SpecKubernetes: required") - } - if v, ok := raw["nodeAllowedSshPublicKey"]; !ok || v == nil { - return fmt.Errorf("field nodeAllowedSshPublicKey in SpecKubernetes: required") - } - if v, ok := raw["nodePools"]; !ok || v == nil { - return fmt.Errorf("field nodePools in SpecKubernetes: required") - } - if v, ok := raw["nodePoolsLaunchKind"]; !ok || v == nil { - return fmt.Errorf("field nodePoolsLaunchKind in SpecKubernetes: required") - } - type Plain SpecKubernetes - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecKubernetes(plain) - return nil -} - -var enumValues_SpecDistributionModulesAuthProviderType = []interface{}{ - "none", - "basicAuth", - "sso", -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecPluginsHelmReleasesElemSetElem) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err + if v, ok := raw["effect"]; !ok || v == nil { + return fmt.Errorf("field effect in TypesKubeToleration_1: required") } - if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecPluginsHelmReleasesElemSetElem: required") + if v, ok := raw["key"]; !ok || v == nil { + return fmt.Errorf("field key in TypesKubeToleration_1: required") } if v, ok := raw["value"]; !ok || v == nil { - return fmt.Errorf("field value in SpecPluginsHelmReleasesElemSetElem: required") + return fmt.Errorf("field value in TypesKubeToleration_1: required") } - type Plain SpecPluginsHelmReleasesElemSetElem + type Plain TypesKubeToleration_1 var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecPluginsHelmReleasesElemSetElem(plain) + *j = TypesKubeToleration_1(plain) return nil } @@ -3552,491 +4082,192 @@ func (j *SpecDistributionModulesAuthDex) UnmarshalJSON(b []byte) error { return nil } -type TypesFuryModuleComponentOverrides struct { - // The node selector to use to place the pods for the minio module - NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` - - // The tolerations that will be added to the pods for the cert-manager module - Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionCustomPatchesSecretGeneratorResource) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecDistributionCustomPatchesSecretGeneratorResource: required") - } - type Plain SpecDistributionCustomPatchesSecretGeneratorResource - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistributionCustomPatchesSecretGeneratorResource(plain) - return nil -} - -type TypesAwsS3KeyPrefix string - -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionCustomPatchesSecretGeneratorResourceBehavior) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior, v) - } - *j = SpecDistributionCustomPatchesSecretGeneratorResourceBehavior(v) - return nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecToolsConfigurationTerraformStateS3) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["bucketName"]; !ok || v == nil { - return fmt.Errorf("field bucketName in SpecToolsConfigurationTerraformStateS3: required") - } - if v, ok := raw["keyPrefix"]; !ok || v == nil { - return fmt.Errorf("field keyPrefix in SpecToolsConfigurationTerraformStateS3: required") - } - if v, ok := raw["region"]; !ok || v == nil { - return fmt.Errorf("field region in SpecToolsConfigurationTerraformStateS3: required") - } - type Plain SpecToolsConfigurationTerraformStateS3 - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecToolsConfigurationTerraformStateS3(plain) - return nil -} - -var enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior = []interface{}{ - "create", - "replace", - "merge", -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecToolsConfigurationTerraformState) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["s3"]; !ok || v == nil { - return fmt.Errorf("field s3 in SpecToolsConfigurationTerraformState: required") - } - type Plain SpecToolsConfigurationTerraformState - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecToolsConfigurationTerraformState(plain) - return nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionCustomPatchesConfigMapGeneratorResource) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecDistributionCustomPatchesConfigMapGeneratorResource: required") - } - type Plain SpecDistributionCustomPatchesConfigMapGeneratorResource - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistributionCustomPatchesConfigMapGeneratorResource(plain) - return nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecToolsConfigurationTerraform) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["state"]; !ok || v == nil { - return fmt.Errorf("field state in SpecToolsConfigurationTerraform: required") - } - type Plain SpecToolsConfigurationTerraform - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecToolsConfigurationTerraform(plain) - return nil -} - -type TypesKubeLabels map[string]string - -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecToolsConfiguration) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["terraform"]; !ok || v == nil { - return fmt.Errorf("field terraform in SpecToolsConfiguration: required") - } - type Plain SpecToolsConfiguration - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecToolsConfiguration(plain) - return nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior, v) - } - *j = SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior(v) - return nil -} - // UnmarshalJSON implements json.Unmarshaler. -func (j *Spec) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesAuthPomerium_2) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["distribution"]; !ok || v == nil { - return fmt.Errorf("field distribution in Spec: required") - } - if v, ok := raw["distributionVersion"]; !ok || v == nil { - return fmt.Errorf("field distributionVersion in Spec: required") - } - if v, ok := raw["kubernetes"]; !ok || v == nil { - return fmt.Errorf("field kubernetes in Spec: required") - } - if v, ok := raw["region"]; !ok || v == nil { - return fmt.Errorf("field region in Spec: required") - } - if v, ok := raw["toolsConfiguration"]; !ok || v == nil { - return fmt.Errorf("field toolsConfiguration in Spec: required") + if v, ok := raw["secrets"]; !ok || v == nil { + return fmt.Errorf("field secrets in SpecDistributionModulesAuthPomerium_2: required") } - type Plain Spec + type Plain SpecDistributionModulesAuthPomerium_2 var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - if len(plain.DistributionVersion) < 1 { - return fmt.Errorf("field %s length: must be >= %d", "distributionVersion", 1) + if err := json.Unmarshal(b, &plain); err != nil { + return err } - *j = Spec(plain) + *j = SpecDistributionModulesAuthPomerium_2(plain) return nil } -var enumValues_SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior = []interface{}{ - "create", - "replace", - "merge", -} - // UnmarshalJSON implements json.Unmarshaler. -func (j *TypesKubeToleration) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionCustomPatchesSecretGeneratorResource) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["effect"]; !ok || v == nil { - return fmt.Errorf("field effect in TypesKubeToleration: required") - } - if v, ok := raw["key"]; !ok || v == nil { - return fmt.Errorf("field key in TypesKubeToleration: required") + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecDistributionCustomPatchesSecretGeneratorResource: required") } - type Plain TypesKubeToleration + type Plain SpecDistributionCustomPatchesSecretGeneratorResource var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = TypesKubeToleration(plain) + *j = SpecDistributionCustomPatchesSecretGeneratorResource(plain) return nil } -type TypesKubeToleration struct { - // Effect corresponds to the JSON schema field "effect". - Effect TypesKubeTolerationEffect `json:"effect" yaml:"effect" mapstructure:"effect"` - - // The key of the toleration - Key string `json:"key" yaml:"key" mapstructure:"key"` - - // Operator corresponds to the JSON schema field "operator". - Operator *TypesKubeTolerationOperator `json:"operator,omitempty" yaml:"operator,omitempty" mapstructure:"operator,omitempty"` - - // The value of the toleration - Value *string `json:"value,omitempty" yaml:"value,omitempty" mapstructure:"value,omitempty"` -} - -const ( - TypesKubeTolerationOperatorEqual TypesKubeTolerationOperator = "Equal" - TypesKubeTolerationOperatorExists TypesKubeTolerationOperator = "Exists" -) - // UnmarshalJSON implements json.Unmarshaler. -func (j *TypesKubeTolerationOperator) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionCustomPatchesSecretGeneratorResourceBehavior) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_TypesKubeTolerationOperator { + for _, expected := range enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesKubeTolerationOperator, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior, v) } - *j = TypesKubeTolerationOperator(v) + *j = SpecDistributionCustomPatchesSecretGeneratorResourceBehavior(v) return nil } -var enumValues_TypesKubeTolerationOperator = []interface{}{ - "Exists", - "Equal", +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionCustomPatchesConfigMapGeneratorResource) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecDistributionCustomPatchesConfigMapGeneratorResource: required") + } + type Plain SpecDistributionCustomPatchesConfigMapGeneratorResource + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionCustomPatchesConfigMapGeneratorResource(plain) + return nil } -type TypesKubeTolerationOperator string - -const ( - TypesKubeTolerationEffectNoExecute TypesKubeTolerationEffect = "NoExecute" - TypesKubeTolerationEffectPreferNoSchedule TypesKubeTolerationEffect = "PreferNoSchedule" - TypesKubeTolerationEffectNoSchedule TypesKubeTolerationEffect = "NoSchedule" -) - // UnmarshalJSON implements json.Unmarshaler. -func (j *TypesKubeTolerationEffect) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_TypesKubeTolerationEffect { + for _, expected := range enumValues_SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesKubeTolerationEffect, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior, v) } - *j = TypesKubeTolerationEffect(v) + *j = SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior(v) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuthPomeriumSecrets) UnmarshalJSON(b []byte) error { +func (j *TypesKubeToleration) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["COOKIE_SECRET"]; !ok || v == nil { - return fmt.Errorf("field COOKIE_SECRET in SpecDistributionModulesAuthPomeriumSecrets: required") - } - if v, ok := raw["IDP_CLIENT_SECRET"]; !ok || v == nil { - return fmt.Errorf("field IDP_CLIENT_SECRET in SpecDistributionModulesAuthPomeriumSecrets: required") - } - if v, ok := raw["SHARED_SECRET"]; !ok || v == nil { - return fmt.Errorf("field SHARED_SECRET in SpecDistributionModulesAuthPomeriumSecrets: required") + if v, ok := raw["effect"]; !ok || v == nil { + return fmt.Errorf("field effect in TypesKubeToleration: required") } - if v, ok := raw["SIGNING_KEY"]; !ok || v == nil { - return fmt.Errorf("field SIGNING_KEY in SpecDistributionModulesAuthPomeriumSecrets: required") + if v, ok := raw["key"]; !ok || v == nil { + return fmt.Errorf("field key in TypesKubeToleration: required") } - type Plain SpecDistributionModulesAuthPomeriumSecrets + type Plain TypesKubeToleration var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesAuthPomeriumSecrets(plain) + *j = TypesKubeToleration(plain) return nil } -type TypesKubeNodeSelector_1 map[string]string - -type TypesKubeTolerationEffect_1 string - -var enumValues_TypesKubeTolerationEffect_1 = []interface{}{ - "NoSchedule", - "PreferNoSchedule", - "NoExecute", -} - // UnmarshalJSON implements json.Unmarshaler. -func (j *TypesKubeTolerationEffect_1) UnmarshalJSON(b []byte) error { +func (j *TypesKubeTolerationOperator) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_TypesKubeTolerationEffect_1 { + for _, expected := range enumValues_TypesKubeTolerationOperator { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesKubeTolerationEffect_1, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesKubeTolerationOperator, v) } - *j = TypesKubeTolerationEffect_1(v) + *j = TypesKubeTolerationOperator(v) return nil } -const ( - TypesKubeTolerationEffect_1_NoSchedule TypesKubeTolerationEffect_1 = "NoSchedule" - TypesKubeTolerationEffect_1_PreferNoSchedule TypesKubeTolerationEffect_1 = "PreferNoSchedule" - TypesKubeTolerationEffect_1_NoExecute TypesKubeTolerationEffect_1 = "NoExecute" -) - -type TypesKubeTolerationOperator_1 string - -var enumValues_TypesKubeTolerationOperator_1 = []interface{}{ +var enumValues_TypesKubeTolerationOperator = []interface{}{ "Exists", "Equal", } // UnmarshalJSON implements json.Unmarshaler. -func (j *TypesKubeTolerationOperator_1) UnmarshalJSON(b []byte) error { +func (j *TypesKubeTolerationEffect) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_TypesKubeTolerationOperator_1 { + for _, expected := range enumValues_TypesKubeTolerationEffect { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesKubeTolerationOperator_1, v) - } - *j = TypesKubeTolerationOperator_1(v) - return nil -} - -const ( - TypesKubeTolerationOperator_1_Exists TypesKubeTolerationOperator_1 = "Exists" - TypesKubeTolerationOperator_1_Equal TypesKubeTolerationOperator_1 = "Equal" -) - -type TypesKubeToleration_1 struct { - // Effect corresponds to the JSON schema field "effect". - Effect TypesKubeTolerationEffect_1 `json:"effect" yaml:"effect" mapstructure:"effect"` - - // Key corresponds to the JSON schema field "key". - Key string `json:"key" yaml:"key" mapstructure:"key"` - - // Operator corresponds to the JSON schema field "operator". - Operator *TypesKubeTolerationOperator_1 `json:"operator,omitempty" yaml:"operator,omitempty" mapstructure:"operator,omitempty"` - - // Value corresponds to the JSON schema field "value". - Value string `json:"value" yaml:"value" mapstructure:"value"` -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *TypesKubeToleration_1) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["effect"]; !ok || v == nil { - return fmt.Errorf("field effect in TypesKubeToleration_1: required") - } - if v, ok := raw["key"]; !ok || v == nil { - return fmt.Errorf("field key in TypesKubeToleration_1: required") - } - if v, ok := raw["value"]; !ok || v == nil { - return fmt.Errorf("field value in TypesKubeToleration_1: required") - } - type Plain TypesKubeToleration_1 - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesKubeTolerationEffect, v) } - *j = TypesKubeToleration_1(plain) + *j = TypesKubeTolerationEffect(v) return nil } -type TypesFuryModuleComponentOverrides_1 struct { - // NodeSelector corresponds to the JSON schema field "nodeSelector". - NodeSelector TypesKubeNodeSelector_1 `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` - - // Tolerations corresponds to the JSON schema field "tolerations". - Tolerations []TypesKubeToleration_1 `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` -} - var enumValues_TypesKubeTolerationEffect = []interface{}{ "NoSchedule", "PreferNoSchedule", "NoExecute", } -type TypesKubeTolerationEffect string - // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuthPomerium_2) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { +func (j *EksclusterKfdV1Alpha2Kind) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { return err } - if v, ok := raw["secrets"]; !ok || v == nil { - return fmt.Errorf("field secrets in SpecDistributionModulesAuthPomerium_2: required") + var ok bool + for _, expected := range enumValues_EksclusterKfdV1Alpha2Kind { + if reflect.DeepEqual(v, expected) { + ok = true + break + } } - type Plain SpecDistributionModulesAuthPomerium_2 - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_EksclusterKfdV1Alpha2Kind, v) } - *j = SpecDistributionModulesAuthPomerium_2(plain) + *j = EksclusterKfdV1Alpha2Kind(v) return nil } -type TypesAwsSshPubKey string - -type TypesEnvRef string - -type TypesFileRef string - -type TypesIpAddress string - -type TypesSemVer string - -type TypesSshPubKey string - -type TypesUri string - // UnmarshalJSON implements json.Unmarshaler. func (j *SpecDistributionCommonProvider) UnmarshalJSON(b []byte) error { var raw map[string]interface{} @@ -4055,32 +4286,6 @@ func (j *SpecDistributionCommonProvider) UnmarshalJSON(b []byte) error { return nil } -var enumValues_EksclusterKfdV1Alpha2Kind = []interface{}{ - "EKSCluster", -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *EksclusterKfdV1Alpha2Kind) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_EksclusterKfdV1Alpha2Kind { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_EksclusterKfdV1Alpha2Kind, v) - } - *j = EksclusterKfdV1Alpha2Kind(v) - return nil -} - -type TypesKubeNodeSelector map[string]string - // UnmarshalJSON implements json.Unmarshaler. func (j *Metadata) UnmarshalJSON(b []byte) error { var raw map[string]interface{} diff --git a/pkg/apis/kfddistribution/v1alpha2/public/schema.go b/pkg/apis/kfddistribution/v1alpha2/public/schema.go index 9a4f9ca9e..2a6f2ce18 100644 --- a/pkg/apis/kfddistribution/v1alpha2/public/schema.go +++ b/pkg/apis/kfddistribution/v1alpha2/public/schema.go @@ -8,6 +8,7 @@ import ( "reflect" ) +// KFD modules deployed on top of an existing Kubernetes cluster. type KfddistributionKfdV1Alpha2 struct { // ApiVersion corresponds to the JSON schema field "apiVersion". ApiVersion string `json:"apiVersion" yaml:"apiVersion" mapstructure:"apiVersion"` @@ -27,7 +28,8 @@ type KfddistributionKfdV1Alpha2Kind string const KfddistributionKfdV1Alpha2KindKFDDistribution KfddistributionKfdV1Alpha2Kind = "KFDDistribution" type Metadata struct { - // Name corresponds to the JSON schema field "name". + // The name of the cluster. It will also be used as a prefix for all the other + // resources created. Name string `json:"name" yaml:"name" mapstructure:"name"` } @@ -35,7 +37,9 @@ type Spec struct { // Distribution corresponds to the JSON schema field "distribution". Distribution SpecDistribution `json:"distribution" yaml:"distribution" mapstructure:"distribution"` - // DistributionVersion corresponds to the JSON schema field "distributionVersion". + // Defines which KFD version will be installed and, in consequence, the Kubernetes + // version used to create the cluster. It supports git tags and branches. Example: + // `v1.30.1`. DistributionVersion string `json:"distributionVersion" yaml:"distributionVersion" mapstructure:"distributionVersion"` // Plugins corresponds to the JSON schema field "plugins". @@ -49,36 +53,45 @@ type SpecDistribution struct { // CustomPatches corresponds to the JSON schema field "customPatches". CustomPatches *SpecDistributionCustompatches `json:"customPatches,omitempty" yaml:"customPatches,omitempty" mapstructure:"customPatches,omitempty"` - // The kubeconfig file path + // The path to the kubeconfig file. Kubeconfig string `json:"kubeconfig" yaml:"kubeconfig" mapstructure:"kubeconfig"` // Modules corresponds to the JSON schema field "modules". Modules SpecDistributionModules `json:"modules" yaml:"modules" mapstructure:"modules"` } +// Common configuration for all the distribution modules. type SpecDistributionCommon struct { - // The node selector to use to place the pods for all the KFD modules + // The node selector to use to place the pods for all the KFD modules. Follows + // Kubernetes selector format. Example: `node.kubernetes.io/role: infra`. NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` // Provider corresponds to the JSON schema field "provider". Provider *SpecDistributionCommonProvider `json:"provider,omitempty" yaml:"provider,omitempty" mapstructure:"provider,omitempty"` // URL of the registry where to pull images from for the Distribution phase. - // (Default is registry.sighup.io/fury). + // (Default is `registry.sighup.io/fury`). // // NOTE: If plugins are pulling from the default registry, the registry will be // replaced for the plugin too. Registry *string `json:"registry,omitempty" yaml:"registry,omitempty" mapstructure:"registry,omitempty"` - // The relative path to the vendor directory, does not need to be changed + // The relative path to the vendor directory, does not need to be changed. RelativeVendorPath *string `json:"relativeVendorPath,omitempty" yaml:"relativeVendorPath,omitempty" mapstructure:"relativeVendorPath,omitempty"` - // The tolerations that will be added to the pods for all the KFD modules + // An array with the tolerations that will be added to the pods for all the KFD + // modules. Follows Kubernetes tolerations format. Example: + // + // ```yaml + // - effect: NoSchedule + // key: node.kubernetes.io/role + // value: infra + // ``` Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` } type SpecDistributionCommonProvider struct { - // The type of the provider + // The provider type. Don't set. FOR INTERNAL USE ONLY. Type string `json:"type" yaml:"type" mapstructure:"type"` } @@ -277,8 +290,11 @@ type SpecDistributionModules struct { Tracing *SpecDistributionModulesTracing `json:"tracing,omitempty" yaml:"tracing,omitempty" mapstructure:"tracing,omitempty"` } +// Configuration for the Auth module. type SpecDistributionModulesAuth struct { - // The base domain for the auth module + // Base domain for the ingresses created by the Auth module (Gangplank, Pomerium, + // Dex). Notice that when nginx type is dual, these will use the `external` + // ingress class. BaseDomain *string `json:"baseDomain,omitempty" yaml:"baseDomain,omitempty" mapstructure:"baseDomain,omitempty"` // Dex corresponds to the JSON schema field "dex". @@ -294,11 +310,25 @@ type SpecDistributionModulesAuth struct { Provider SpecDistributionModulesAuthProvider `json:"provider" yaml:"provider" mapstructure:"provider"` } +// Configuration for the Dex package. type SpecDistributionModulesAuthDex struct { - // The additional static clients for dex + // Additional static clients defitions that will be added to the default clients + // included with the distribution in Dex's configuration. Example: + // + // ```yaml + // additionalStaticClients: + // - id: my-custom-client + // name: "A custom additional static client" + // redirectURIs: + // - "https://myapp.tld/redirect" + // - "https://alias.tld/oidc-callback" + // secret: supersecretpassword + // ``` + // Reference: https://dexidp.io/docs/connectors/local/ AdditionalStaticClients []interface{} `json:"additionalStaticClients,omitempty" yaml:"additionalStaticClients,omitempty" mapstructure:"additionalStaticClients,omitempty"` - // The connectors for dex + // A list with each item defining a Dex connector. Follows Dex connectors + // configuration format: https://dexidp.io/docs/connectors/ Connectors []interface{} `json:"connectors" yaml:"connectors" mapstructure:"connectors"` // Expiry corresponds to the JSON schema field "expiry". @@ -316,22 +346,25 @@ type SpecDistributionModulesAuthDexExpiry struct { SigningKeys *string `json:"signingKeys,omitempty" yaml:"signingKeys,omitempty" mapstructure:"signingKeys,omitempty"` } +// Override the common configuration with a particular configuration for the Auth +// module. type SpecDistributionModulesAuthOverrides struct { // Ingresses corresponds to the JSON schema field "ingresses". Ingresses SpecDistributionModulesAuthOverridesIngresses `json:"ingresses,omitempty" yaml:"ingresses,omitempty" mapstructure:"ingresses,omitempty"` - // The node selector to use to place the pods for the auth module + // Set to override the node selector used to place the pods of the Auth module. NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` - // The tolerations that will be added to the pods for the auth module + // Set to override the tolerations that will be added to the pods of the Auth + // module. Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` } type SpecDistributionModulesAuthOverridesIngress struct { - // The host of the ingress + // Use this host for the ingress instead of the default one. Host string `json:"host" yaml:"host" mapstructure:"host"` - // The ingress class of the ingress + // Use this ingress class for the ingress instead of the default one. IngressClass string `json:"ingressClass" yaml:"ingressClass" mapstructure:"ingressClass"` } @@ -459,15 +492,23 @@ type SpecDistributionModulesAuthProvider struct { // BasicAuth corresponds to the JSON schema field "basicAuth". BasicAuth *SpecDistributionModulesAuthProviderBasicAuth `json:"basicAuth,omitempty" yaml:"basicAuth,omitempty" mapstructure:"basicAuth,omitempty"` - // The type of the provider, must be ***none***, ***sso*** or ***basicAuth*** + // The type of the Auth provider, options are: + // - `none`: will disable authentication in the infrastructural ingresses. + // - `sso`: will protect the infrastructural ingresses with Pomerium and Dex (SSO) + // and require authentication before accessing them. + // - `basicAuth`: will protect the infrastructural ingresses with HTTP basic auth + // (username and password) authentication. + // + // Default is `none`. Type SpecDistributionModulesAuthProviderType `json:"type" yaml:"type" mapstructure:"type"` } +// Configuration for the HTTP Basic Auth provider. type SpecDistributionModulesAuthProviderBasicAuth struct { - // The password for the basic auth + // The password for logging in with the HTTP basic authentication. Password string `json:"password" yaml:"password" mapstructure:"password"` - // The username for the basic auth + // The username for logging in with the HTTP basic authentication. Username string `json:"username" yaml:"username" mapstructure:"username"` } @@ -479,11 +520,16 @@ const ( SpecDistributionModulesAuthProviderTypeSso SpecDistributionModulesAuthProviderType = "sso" ) +// Configuration for the Disaster Recovery module. type SpecDistributionModulesDr struct { // Overrides corresponds to the JSON schema field "overrides". Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - // The type of the DR, must be ***none*** or ***on-premises*** + // The type of the Disaster Recovery, must be `none` or `on-premises`. `none` + // disables the module and `on-premises` will install Velero and an optional MinIO + // deployment. + // + // Default is `none`. Type SpecDistributionModulesDrType `json:"type" yaml:"type" mapstructure:"type"` // Velero corresponds to the JSON schema field "velero". @@ -497,6 +543,7 @@ const ( SpecDistributionModulesDrTypeOnPremises SpecDistributionModulesDrType = "on-premises" ) +// Configuration for the Velero package. type SpecDistributionModulesDrVelero struct { // The storage backend type for Velero. `minio` will use an in-cluster MinIO // deployment for object storage, `externalEndpoint` can be used to point to an @@ -564,24 +611,31 @@ type SpecDistributionModulesDrVeleroSchedulesCron struct { } type SpecDistributionModulesIngress struct { - // the base domain used for all the KFD ingresses, if in the nginx dual - // configuration, it should be the same as the - // .spec.distribution.modules.ingress.dns.private.name zone + // The base domain used for all the KFD infrastructural ingresses. If using the + // nginx dual type, this value should be the same as the domain associated with + // the `internal` ingress class. BaseDomain string `json:"baseDomain" yaml:"baseDomain" mapstructure:"baseDomain"` - // CertManager corresponds to the JSON schema field "certManager". + // Configuration for the cert-manager package. Required even if + // `ingress.nginx.type` is `none`, cert-manager is used for managing other + // certificates in the cluster besides the TLS termination certificates for the + // ingresses. CertManager *SpecDistributionModulesIngressCertManager `json:"certManager,omitempty" yaml:"certManager,omitempty" mapstructure:"certManager,omitempty"` // Forecastle corresponds to the JSON schema field "forecastle". Forecastle *SpecDistributionModulesIngressForecastle `json:"forecastle,omitempty" yaml:"forecastle,omitempty" mapstructure:"forecastle,omitempty"` - // Configurations for the nginx ingress controller module + // Configurations for the Ingress nginx controller package. Nginx SpecDistributionModulesIngressNginx `json:"nginx" yaml:"nginx" mapstructure:"nginx"` // Overrides corresponds to the JSON schema field "overrides". Overrides *SpecDistributionModulesIngressOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` } +// Configuration for the cert-manager package. Required even if +// `ingress.nginx.type` is `none`, cert-manager is used for managing other +// certificates in the cluster besides the TLS termination certificates for the +// ingresses. type SpecDistributionModulesIngressCertManager struct { // ClusterIssuer corresponds to the JSON schema field "clusterIssuer". ClusterIssuer SpecDistributionModulesIngressCertManagerClusterIssuer `json:"clusterIssuer" yaml:"clusterIssuer" mapstructure:"clusterIssuer"` @@ -590,17 +644,21 @@ type SpecDistributionModulesIngressCertManager struct { Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` } +// Configuration for the cert-manager's ACME clusterIssuer used to request +// certificates from Let's Encrypt. type SpecDistributionModulesIngressCertManagerClusterIssuer struct { - // The email of the cluster issuer + // The email address to use during the certificate issuing process. Email string `json:"email" yaml:"email" mapstructure:"email"` - // The name of the cluster issuer + // Name of the clusterIssuer. Name string `json:"name" yaml:"name" mapstructure:"name"` - // The custom solvers configurations + // List of challenge solvers to use instead of the default one for the `http01` + // challenge. Solvers []interface{} `json:"solvers,omitempty" yaml:"solvers,omitempty" mapstructure:"solvers,omitempty"` - // The type of the cluster issuer, must be ***http01*** + // The type of the clusterIssuer. Only `http01` challenge is supported for + // KFDDistribution kind. See solvers for arbitrary configurations. Type *SpecDistributionModulesIngressCertManagerClusterIssuerType `json:"type,omitempty" yaml:"type,omitempty" mapstructure:"type,omitempty"` } @@ -620,14 +678,24 @@ type SpecDistributionModulesIngressNginx struct { // Tls corresponds to the JSON schema field "tls". Tls *SpecDistributionModulesIngressNginxTLS `json:"tls,omitempty" yaml:"tls,omitempty" mapstructure:"tls,omitempty"` - // The type of the nginx ingress controller, must be ***none***, ***single*** or - // ***dual*** + // The type of the Ingress nginx controller, options are: + // - `none`: no ingress controller will be installed and no infrastructural + // ingresses will be created. + // - `single`: a single ingress controller with ingress class `nginx` will be + // installed to manage all the ingress resources, infrastructural ingresses will + // be created. + // - `dual`: two independent ingress controllers will be installed, one for the + // `internal` ingress class intended for private ingresses and one for the + // `external` ingress class intended for public ingresses. KFD infrastructural + // ingresses wil use the `internal` ingress class when using the dual type. + // + // Default is `single`. Type SpecDistributionModulesIngressNginxType `json:"type" yaml:"type" mapstructure:"type"` } type SpecDistributionModulesIngressNginxTLS struct { - // The provider of the TLS certificate, must be ***none***, ***certManager*** or - // ***secret*** + // The provider of the TLS certificates for the ingresses, one of: `none`, + // `certManager`, or `secret`. Provider SpecDistributionModulesIngressNginxTLSProvider `json:"provider" yaml:"provider" mapstructure:"provider"` // Secret corresponds to the JSON schema field "secret". @@ -642,15 +710,18 @@ const ( SpecDistributionModulesIngressNginxTLSProviderSecret SpecDistributionModulesIngressNginxTLSProvider = "secret" ) +// Kubernetes TLS secret for the ingresses TLS certificate. type SpecDistributionModulesIngressNginxTLSSecret struct { - // Ca corresponds to the JSON schema field "ca". + // The Certificate Authority certificate file's content. You can use the + // `"{file://}"` notation to get the content from a file. Ca string `json:"ca" yaml:"ca" mapstructure:"ca"` - // The certificate file content or you can use the file notation to get the - // content from a file + // The certificate file's content. You can use the `"{file://}"` notation to + // get the content from a file. Cert string `json:"cert" yaml:"cert" mapstructure:"cert"` - // Key corresponds to the JSON schema field "key". + // The signing key file's content. You can use the `"{file://}"` notation to + // get the content from a file. Key string `json:"key" yaml:"key" mapstructure:"key"` } @@ -662,14 +733,17 @@ const ( SpecDistributionModulesIngressNginxTypeSingle SpecDistributionModulesIngressNginxType = "single" ) +// Override the common configuration with a particular configuration for the +// Ingress module. type SpecDistributionModulesIngressOverrides struct { // Ingresses corresponds to the JSON schema field "ingresses". Ingresses *SpecDistributionModulesIngressOverridesIngresses `json:"ingresses,omitempty" yaml:"ingresses,omitempty" mapstructure:"ingresses,omitempty"` - // The node selector to use to place the pods for the ingress module + // Set to override the node selector used to place the pods of the Ingress module. NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` - // The tolerations that will be added to the pods for the ingress module + // Set to override the tolerations that will be added to the pods of the Ingress + // module. Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` } @@ -678,6 +752,7 @@ type SpecDistributionModulesIngressOverridesIngresses struct { Forecastle *TypesFuryModuleOverridesIngress `json:"forecastle,omitempty" yaml:"forecastle,omitempty" mapstructure:"forecastle,omitempty"` } +// Configuration for the Logging module. type SpecDistributionModulesLogging struct { // Cerebro corresponds to the JSON schema field "cerebro". Cerebro *SpecDistributionModulesLoggingCerebro `json:"cerebro,omitempty" yaml:"cerebro,omitempty" mapstructure:"cerebro,omitempty"` @@ -700,79 +775,87 @@ type SpecDistributionModulesLogging struct { // Overrides corresponds to the JSON schema field "overrides". Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - // selects the logging stack. Choosing none will disable the centralized logging. - // Choosing opensearch will deploy and configure the Logging Operator and an + // Selects the logging stack. Options are: + // - `none`: will disable the centralized logging. + // - `opensearch`: will deploy and configure the Logging Operator and an // OpenSearch cluster (can be single or triple for HA) where the logs will be - // stored. Choosing loki will use a distributed Grafana Loki instead of OpenSearh - // for storage. Choosing customOuput the Logging Operator will be deployed and - // installed but with no local storage, you will have to create the needed Outputs - // and ClusterOutputs to ship the logs to your desired storage. + // stored. + // - `loki`: will use a distributed Grafana Loki instead of OpenSearh for storage. + // - `customOuputs`: the Logging Operator will be deployed and installed but with + // no local storage, you will have to create the needed Outputs and ClusterOutputs + // to ship the logs to your desired storage. + // + // Default is `opensearch`. Type SpecDistributionModulesLoggingType `json:"type" yaml:"type" mapstructure:"type"` } +// DEPRECATED since KFD v1.26.6, 1.27.5, v1.28.0. type SpecDistributionModulesLoggingCerebro struct { // Overrides corresponds to the JSON schema field "overrides". Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` } -// when using the customOutputs logging type, you need to manually specify the spec -// of the several Output and ClusterOutputs that the Logging Operator expects to -// forward the logs collected by the pre-defined flows. +// When using the `customOutputs` logging type, you need to manually specify the +// spec of the several `Output` and `ClusterOutputs` that the Logging Operator +// expects to forward the logs collected by the pre-defined flows. type SpecDistributionModulesLoggingCustomOutputs struct { - // This value defines where the output from Flow will be sent. Will be the `spec` - // section of the `Output` object. It must be a string (and not a YAML object) - // following the OutputSpec definition. Use the nullout output to discard the - // flow. + // This value defines where the output from the `audit` Flow will be sent. This + // will be the `spec` section of the `Output` object. It must be a string (and not + // a YAML object) following the OutputSpec definition. Use the `nullout` output to + // discard the flow: `nullout: {}` Audit string `json:"audit" yaml:"audit" mapstructure:"audit"` - // This value defines where the output from Flow will be sent. Will be the `spec` - // section of the `Output` object. It must be a string (and not a YAML object) - // following the OutputSpec definition. Use the nullout output to discard the - // flow. + // This value defines where the output from the `errors` Flow will be sent. This + // will be the `spec` section of the `Output` object. It must be a string (and not + // a YAML object) following the OutputSpec definition. Use the `nullout` output to + // discard the flow: `nullout: {}` Errors string `json:"errors" yaml:"errors" mapstructure:"errors"` - // This value defines where the output from Flow will be sent. Will be the `spec` - // section of the `Output` object. It must be a string (and not a YAML object) - // following the OutputSpec definition. Use the nullout output to discard the - // flow. + // This value defines where the output from the `events` Flow will be sent. This + // will be the `spec` section of the `Output` object. It must be a string (and not + // a YAML object) following the OutputSpec definition. Use the `nullout` output to + // discard the flow: `nullout: {}` Events string `json:"events" yaml:"events" mapstructure:"events"` - // This value defines where the output from Flow will be sent. Will be the `spec` - // section of the `Output` object. It must be a string (and not a YAML object) - // following the OutputSpec definition. Use the nullout output to discard the - // flow. + // This value defines where the output from the `infra` Flow will be sent. This + // will be the `spec` section of the `Output` object. It must be a string (and not + // a YAML object) following the OutputSpec definition. Use the `nullout` output to + // discard the flow: `nullout: {}` Infra string `json:"infra" yaml:"infra" mapstructure:"infra"` - // This value defines where the output from Flow will be sent. Will be the `spec` - // section of the `Output` object. It must be a string (and not a YAML object) - // following the OutputSpec definition. Use the nullout output to discard the - // flow. + // This value defines where the output from the `ingressNginx` Flow will be sent. + // This will be the `spec` section of the `Output` object. It must be a string + // (and not a YAML object) following the OutputSpec definition. Use the `nullout` + // output to discard the flow: `nullout: {}` IngressNginx string `json:"ingressNginx" yaml:"ingressNginx" mapstructure:"ingressNginx"` - // This value defines where the output from Flow will be sent. Will be the `spec` - // section of the `Output` object. It must be a string (and not a YAML object) - // following the OutputSpec definition. Use the nullout output to discard the - // flow. + // This value defines where the output from the `kubernetes` Flow will be sent. + // This will be the `spec` section of the `Output` object. It must be a string + // (and not a YAML object) following the OutputSpec definition. Use the `nullout` + // output to discard the flow: `nullout: {}` Kubernetes string `json:"kubernetes" yaml:"kubernetes" mapstructure:"kubernetes"` - // This value defines where the output from Flow will be sent. Will be the `spec` - // section of the `Output` object. It must be a string (and not a YAML object) - // following the OutputSpec definition. Use the nullout output to discard the - // flow. + // This value defines where the output from the `systemdCommon` Flow will be sent. + // This will be the `spec` section of the `Output` object. It must be a string + // (and not a YAML object) following the OutputSpec definition. Use the `nullout` + // output to discard the flow: `nullout: {}` SystemdCommon string `json:"systemdCommon" yaml:"systemdCommon" mapstructure:"systemdCommon"` - // This value defines where the output from Flow will be sent. Will be the `spec` - // section of the `Output` object. It must be a string (and not a YAML object) - // following the OutputSpec definition. Use the nullout output to discard the - // flow. + // This value defines where the output from the `systemdEtcd` Flow will be sent. + // This will be the `spec` section of the `Output` object. It must be a string + // (and not a YAML object) following the OutputSpec definition. Use the `nullout` + // output to discard the flow: `nullout: {}` SystemdEtcd string `json:"systemdEtcd" yaml:"systemdEtcd" mapstructure:"systemdEtcd"` } +// Configuration for the Loki package. type SpecDistributionModulesLoggingLoki struct { - // Backend corresponds to the JSON schema field "backend". + // The storage backend type for Loki. `minio` will use an in-cluster MinIO + // deployment for object storage, `externalEndpoint` can be used to point to an + // external object storage instead of deploying an in-cluster MinIO. Backend *SpecDistributionModulesLoggingLokiBackend `json:"backend,omitempty" yaml:"backend,omitempty" mapstructure:"backend,omitempty"` - // ExternalEndpoint corresponds to the JSON schema field "externalEndpoint". + // Configuration for Loki's external storage backend. ExternalEndpoint *SpecDistributionModulesLoggingLokiExternalEndpoint `json:"externalEndpoint,omitempty" yaml:"externalEndpoint,omitempty" mapstructure:"externalEndpoint,omitempty"` // Resources corresponds to the JSON schema field "resources". @@ -786,23 +869,25 @@ const ( SpecDistributionModulesLoggingLokiBackendMinio SpecDistributionModulesLoggingLokiBackend = "minio" ) +// Configuration for Loki's external storage backend. type SpecDistributionModulesLoggingLokiExternalEndpoint struct { - // The access key id of the loki external endpoint + // The access key ID (username) for the external S3-compatible bucket. AccessKeyId *string `json:"accessKeyId,omitempty" yaml:"accessKeyId,omitempty" mapstructure:"accessKeyId,omitempty"` - // The bucket name of the loki external endpoint + // The bucket name of the external S3-compatible object storage. BucketName *string `json:"bucketName,omitempty" yaml:"bucketName,omitempty" mapstructure:"bucketName,omitempty"` - // The endpoint of the loki external endpoint + // External S3-compatible endpoint for Loki's storage. Endpoint *string `json:"endpoint,omitempty" yaml:"endpoint,omitempty" mapstructure:"endpoint,omitempty"` - // If true, the loki external endpoint will be insecure + // If true, will use HTTP as protocol instead of HTTPS. Insecure *bool `json:"insecure,omitempty" yaml:"insecure,omitempty" mapstructure:"insecure,omitempty"` - // The secret access key of the loki external endpoint + // The secret access key (password) for the external S3-compatible bucket. SecretAccessKey *string `json:"secretAccessKey,omitempty" yaml:"secretAccessKey,omitempty" mapstructure:"secretAccessKey,omitempty"` } +// Configuration for Logging's MinIO deployment. type SpecDistributionModulesLoggingMinio struct { // Overrides corresponds to the JSON schema field "overrides". Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` @@ -810,15 +895,15 @@ type SpecDistributionModulesLoggingMinio struct { // RootUser corresponds to the JSON schema field "rootUser". RootUser *SpecDistributionModulesLoggingMinioRootUser `json:"rootUser,omitempty" yaml:"rootUser,omitempty" mapstructure:"rootUser,omitempty"` - // The PVC size for each minio disk, 6 disks total + // The PVC size for each MinIO disk, 6 disks total. StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"` } type SpecDistributionModulesLoggingMinioRootUser struct { - // The password of the minio root user + // The password for the default MinIO root user. Password *string `json:"password,omitempty" yaml:"password,omitempty" mapstructure:"password,omitempty"` - // The username of the minio root user + // The username for the default MinIO root user. Username *string `json:"username,omitempty" yaml:"username,omitempty" mapstructure:"username,omitempty"` } @@ -829,10 +914,11 @@ type SpecDistributionModulesLoggingOpensearch struct { // Resources corresponds to the JSON schema field "resources". Resources *TypesKubeResources `json:"resources,omitempty" yaml:"resources,omitempty" mapstructure:"resources,omitempty"` - // The storage size for the opensearch pods + // The storage size for the OpenSearch volumes. StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"` - // The type of the opensearch, must be ***single*** or ***triple*** + // The type of OpenSearch deployment. One of: `single` for a single replica or + // `triple` for an HA 3-replicas deployment. Type SpecDistributionModulesLoggingOpensearchType `json:"type" yaml:"type" mapstructure:"type"` } @@ -843,6 +929,7 @@ const ( SpecDistributionModulesLoggingOpensearchTypeTriple SpecDistributionModulesLoggingOpensearchType = "triple" ) +// Configuration for the Logging Operator. type SpecDistributionModulesLoggingOperator struct { // Overrides corresponds to the JSON schema field "overrides". Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` @@ -853,682 +940,448 @@ type SpecDistributionModulesLoggingType string const ( SpecDistributionModulesLoggingTypeCustomOutputs SpecDistributionModulesLoggingType = "customOutputs" SpecDistributionModulesLoggingTypeLoki SpecDistributionModulesLoggingType = "loki" - SpecDistributionModulesLoggingTypeNone SpecDistributionModulesLoggingType = "none" - SpecDistributionModulesLoggingTypeOpensearch SpecDistributionModulesLoggingType = "opensearch" ) -// configuration for the Monitoring module components -type SpecDistributionModulesMonitoring struct { - // Alertmanager corresponds to the JSON schema field "alertmanager". - Alertmanager *SpecDistributionModulesMonitoringAlertManager `json:"alertmanager,omitempty" yaml:"alertmanager,omitempty" mapstructure:"alertmanager,omitempty"` - - // BlackboxExporter corresponds to the JSON schema field "blackboxExporter". - BlackboxExporter *SpecDistributionModulesMonitoringBlackboxExporter `json:"blackboxExporter,omitempty" yaml:"blackboxExporter,omitempty" mapstructure:"blackboxExporter,omitempty"` - - // Grafana corresponds to the JSON schema field "grafana". - Grafana *SpecDistributionModulesMonitoringGrafana `json:"grafana,omitempty" yaml:"grafana,omitempty" mapstructure:"grafana,omitempty"` - - // KubeStateMetrics corresponds to the JSON schema field "kubeStateMetrics". - KubeStateMetrics *SpecDistributionModulesMonitoringKubeStateMetrics `json:"kubeStateMetrics,omitempty" yaml:"kubeStateMetrics,omitempty" mapstructure:"kubeStateMetrics,omitempty"` - - // Mimir corresponds to the JSON schema field "mimir". - Mimir *SpecDistributionModulesMonitoringMimir `json:"mimir,omitempty" yaml:"mimir,omitempty" mapstructure:"mimir,omitempty"` - - // Minio corresponds to the JSON schema field "minio". - Minio *SpecDistributionModulesMonitoringMinio `json:"minio,omitempty" yaml:"minio,omitempty" mapstructure:"minio,omitempty"` - - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - - // Prometheus corresponds to the JSON schema field "prometheus". - Prometheus *SpecDistributionModulesMonitoringPrometheus `json:"prometheus,omitempty" yaml:"prometheus,omitempty" mapstructure:"prometheus,omitempty"` - - // PrometheusAgent corresponds to the JSON schema field "prometheusAgent". - PrometheusAgent *SpecDistributionModulesMonitoringPrometheusAgent `json:"prometheusAgent,omitempty" yaml:"prometheusAgent,omitempty" mapstructure:"prometheusAgent,omitempty"` - - // The type of the monitoring, must be ***none***, ***prometheus***, - // ***prometheusAgent*** or ***mimir***. - // - // - `none`: will disable the whole monitoring stack. - // - `prometheus`: will install Prometheus Operator and a preconfigured Prometheus - // instace, Alertmanager, a set of alert rules, exporters needed to monitor all - // the components of the cluster, Grafana and a series of dashboards to view the - // collected metrics, and more. - // - `prometheusAgent`: wil install Prometheus operator, an instance of Prometheus - // in Agent mode (no alerting, no queries, no storage), and all the exporters - // needed to get metrics for the status of the cluster and the workloads. Useful - // when having a centralized (remote) Prometheus where to ship the metrics and not - // storing them locally in the cluster. - // - `mimir`: will install the same as the `prometheus` option, and in addition - // Grafana Mimir that allows for longer retention of metrics and the usage of - // Object Storage. - Type SpecDistributionModulesMonitoringType `json:"type" yaml:"type" mapstructure:"type"` - - // X509Exporter corresponds to the JSON schema field "x509Exporter". - X509Exporter *SpecDistributionModulesMonitoringX509Exporter `json:"x509Exporter,omitempty" yaml:"x509Exporter,omitempty" mapstructure:"x509Exporter,omitempty"` -} - -type SpecDistributionModulesMonitoringAlertManager struct { - // The webhook url to send deadman switch monitoring, for example to use with - // healthchecks.io - DeadManSwitchWebhookUrl *string `json:"deadManSwitchWebhookUrl,omitempty" yaml:"deadManSwitchWebhookUrl,omitempty" mapstructure:"deadManSwitchWebhookUrl,omitempty"` - - // If true, the default rules will be installed - InstallDefaultRules *bool `json:"installDefaultRules,omitempty" yaml:"installDefaultRules,omitempty" mapstructure:"installDefaultRules,omitempty"` - - // The slack webhook url to send alerts - SlackWebhookUrl *string `json:"slackWebhookUrl,omitempty" yaml:"slackWebhookUrl,omitempty" mapstructure:"slackWebhookUrl,omitempty"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesPolicyKyvernoValidationFailureAction) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction, v) + } + *j = SpecDistributionModulesPolicyKyvernoValidationFailureAction(v) + return nil } -type SpecDistributionModulesMonitoringBlackboxExporter struct { - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesIngressNginxTLSSecret) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["ca"]; !ok || v == nil { + return fmt.Errorf("field ca in SpecDistributionModulesIngressNginxTLSSecret: required") + } + if v, ok := raw["cert"]; !ok || v == nil { + return fmt.Errorf("field cert in SpecDistributionModulesIngressNginxTLSSecret: required") + } + if v, ok := raw["key"]; !ok || v == nil { + return fmt.Errorf("field key in SpecDistributionModulesIngressNginxTLSSecret: required") + } + type Plain SpecDistributionModulesIngressNginxTLSSecret + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesIngressNginxTLSSecret(plain) + return nil } -type SpecDistributionModulesMonitoringGrafana struct { - // Setting this to true will deploy an additional `grafana-basic-auth` ingress - // protected with Grafana's basic auth instead of SSO. It's intended use is as a - // temporary ingress for when there are problems with the SSO login flow. - // - // Notice that by default anonymous access is enabled. - BasicAuthIngress *bool `json:"basicAuthIngress,omitempty" yaml:"basicAuthIngress,omitempty" mapstructure:"basicAuthIngress,omitempty"` - - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - - // [JMESPath](http://jmespath.org/examples.html) expression to retrieve the user's - // role. Example: - // - // ```yaml - // usersRoleAttributePath: "contains(groups[*], 'beta') && 'Admin' || - // contains(groups[*], 'gamma') && 'Editor' || contains(groups[*], 'delta') && - // 'Viewer' - // ``` - // - // More details in [Grafana's - // documentation](https://grafana.com/docs/grafana/latest/setup-grafana/configure-security/configure-authentication/generic-oauth/#configure-role-mapping). - UsersRoleAttributePath *string `json:"usersRoleAttributePath,omitempty" yaml:"usersRoleAttributePath,omitempty" mapstructure:"usersRoleAttributePath,omitempty"` +var enumValues_SpecDistributionModulesIngressNginxType = []interface{}{ + "none", + "single", + "dual", } -type SpecDistributionModulesMonitoringKubeStateMetrics struct { - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesIngressNginxType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesIngressNginxType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressNginxType, v) + } + *j = SpecDistributionModulesIngressNginxType(v) + return nil } -type SpecDistributionModulesMonitoringMimir struct { - // The backend for the mimir pods, must be ***minio*** or ***externalEndpoint*** - Backend *SpecDistributionModulesMonitoringMimirBackend `json:"backend,omitempty" yaml:"backend,omitempty" mapstructure:"backend,omitempty"` - - // ExternalEndpoint corresponds to the JSON schema field "externalEndpoint". - ExternalEndpoint *SpecDistributionModulesMonitoringMimirExternalEndpoint `json:"externalEndpoint,omitempty" yaml:"externalEndpoint,omitempty" mapstructure:"externalEndpoint,omitempty"` - - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - - // The retention time for the mimir pods - RetentionTime *string `json:"retentionTime,omitempty" yaml:"retentionTime,omitempty" mapstructure:"retentionTime,omitempty"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesIngressNginxTLSProvider) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesIngressNginxTLSProvider { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressNginxTLSProvider, v) + } + *j = SpecDistributionModulesIngressNginxTLSProvider(v) + return nil } -type SpecDistributionModulesMonitoringMimirBackend string - -const ( - SpecDistributionModulesMonitoringMimirBackendExternalEndpoint SpecDistributionModulesMonitoringMimirBackend = "externalEndpoint" - SpecDistributionModulesMonitoringMimirBackendMinio SpecDistributionModulesMonitoringMimirBackend = "minio" -) - -type SpecDistributionModulesMonitoringMimirExternalEndpoint struct { - // The access key id of the external mimir backend - AccessKeyId *string `json:"accessKeyId,omitempty" yaml:"accessKeyId,omitempty" mapstructure:"accessKeyId,omitempty"` - - // The bucket name of the external mimir backend - BucketName *string `json:"bucketName,omitempty" yaml:"bucketName,omitempty" mapstructure:"bucketName,omitempty"` - - // The endpoint of the external mimir backend - Endpoint *string `json:"endpoint,omitempty" yaml:"endpoint,omitempty" mapstructure:"endpoint,omitempty"` - - // If true, the external mimir backend will not use tls - Insecure *bool `json:"insecure,omitempty" yaml:"insecure,omitempty" mapstructure:"insecure,omitempty"` - - // The secret access key of the external mimir backend - SecretAccessKey *string `json:"secretAccessKey,omitempty" yaml:"secretAccessKey,omitempty" mapstructure:"secretAccessKey,omitempty"` +var enumValues_SpecDistributionModulesIngressNginxTLSProvider = []interface{}{ + "certManager", + "secret", + "none", } -type SpecDistributionModulesMonitoringMinio struct { - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - - // RootUser corresponds to the JSON schema field "rootUser". - RootUser *SpecDistributionModulesMonitoringMinioRootUser `json:"rootUser,omitempty" yaml:"rootUser,omitempty" mapstructure:"rootUser,omitempty"` - - // The storage size for the minio pods - StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesIngressCertManager) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["clusterIssuer"]; !ok || v == nil { + return fmt.Errorf("field clusterIssuer in SpecDistributionModulesIngressCertManager: required") + } + type Plain SpecDistributionModulesIngressCertManager + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesIngressCertManager(plain) + return nil } -type SpecDistributionModulesMonitoringMinioRootUser struct { - // The password for the minio root user - Password *string `json:"password,omitempty" yaml:"password,omitempty" mapstructure:"password,omitempty"` - - // The username for the minio root user - Username *string `json:"username,omitempty" yaml:"username,omitempty" mapstructure:"username,omitempty"` -} - -type SpecDistributionModulesMonitoringPrometheus struct { - // Set this option to ship the collected metrics to a remote Prometheus receiver. - // - // `remoteWrite` is an array of objects that allows configuring the - // [remoteWrite](https://prometheus.io/docs/specs/remote_write_spec/) options for - // Prometheus. The objects in the array follow [the same schema as in the - // prometheus - // operator](https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.RemoteWriteSpec). - RemoteWrite []SpecDistributionModulesMonitoringPrometheusRemoteWriteElem `json:"remoteWrite,omitempty" yaml:"remoteWrite,omitempty" mapstructure:"remoteWrite,omitempty"` - - // Resources corresponds to the JSON schema field "resources". - Resources *TypesKubeResources `json:"resources,omitempty" yaml:"resources,omitempty" mapstructure:"resources,omitempty"` - - // The retention size for the k8s Prometheus instance. - RetentionSize *string `json:"retentionSize,omitempty" yaml:"retentionSize,omitempty" mapstructure:"retentionSize,omitempty"` - - // The retention time for the K8s Prometheus instance. - RetentionTime *string `json:"retentionTime,omitempty" yaml:"retentionTime,omitempty" mapstructure:"retentionTime,omitempty"` - - // The storage size for the k8s Prometheus instance. - StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"` -} - -type SpecDistributionModulesMonitoringPrometheusAgent struct { - // Set this option to ship the collected metrics to a remote Prometheus receiver. - // - // `remoteWrite` is an array of objects that allows configuring the - // [remoteWrite](https://prometheus.io/docs/specs/remote_write_spec/) options for - // Prometheus. The objects in the array follow [the same schema as in the - // prometheus - // operator](https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.RemoteWriteSpec). - RemoteWrite []SpecDistributionModulesMonitoringPrometheusAgentRemoteWriteElem `json:"remoteWrite,omitempty" yaml:"remoteWrite,omitempty" mapstructure:"remoteWrite,omitempty"` - - // Resources corresponds to the JSON schema field "resources". - Resources *TypesKubeResources `json:"resources,omitempty" yaml:"resources,omitempty" mapstructure:"resources,omitempty"` -} - -type SpecDistributionModulesMonitoringPrometheusAgentRemoteWriteElem map[string]interface{} - -type SpecDistributionModulesMonitoringPrometheusRemoteWriteElem map[string]interface{} - -type SpecDistributionModulesMonitoringType string - -const ( - SpecDistributionModulesMonitoringTypeMimir SpecDistributionModulesMonitoringType = "mimir" - SpecDistributionModulesMonitoringTypeNone SpecDistributionModulesMonitoringType = "none" - SpecDistributionModulesMonitoringTypePrometheus SpecDistributionModulesMonitoringType = "prometheus" - SpecDistributionModulesMonitoringTypePrometheusAgent SpecDistributionModulesMonitoringType = "prometheusAgent" -) - -type SpecDistributionModulesMonitoringX509Exporter struct { - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` -} - -type SpecDistributionModulesNetworking struct { - // Cilium corresponds to the JSON schema field "cilium". - Cilium *SpecDistributionModulesNetworkingCilium `json:"cilium,omitempty" yaml:"cilium,omitempty" mapstructure:"cilium,omitempty"` - - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - - // TigeraOperator corresponds to the JSON schema field "tigeraOperator". - TigeraOperator *SpecDistributionModulesNetworkingTigeraOperator `json:"tigeraOperator,omitempty" yaml:"tigeraOperator,omitempty" mapstructure:"tigeraOperator,omitempty"` - - // The type of networking to use, either ***none***, ***calico*** or ***cilium*** - Type SpecDistributionModulesNetworkingType `json:"type" yaml:"type" mapstructure:"type"` -} - -type SpecDistributionModulesNetworkingCilium struct { - // MaskSize corresponds to the JSON schema field "maskSize". - MaskSize string `json:"maskSize" yaml:"maskSize" mapstructure:"maskSize"` - - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - - // PodCidr corresponds to the JSON schema field "podCidr". - PodCidr TypesCidr `json:"podCidr" yaml:"podCidr" mapstructure:"podCidr"` -} - -type SpecDistributionModulesNetworkingTigeraOperator struct { - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` -} - -type SpecDistributionModulesNetworkingType string - -const ( - SpecDistributionModulesNetworkingTypeCalico SpecDistributionModulesNetworkingType = "calico" - SpecDistributionModulesNetworkingTypeCilium SpecDistributionModulesNetworkingType = "cilium" - SpecDistributionModulesNetworkingTypeNone SpecDistributionModulesNetworkingType = "none" -) - -type SpecDistributionModulesPolicy struct { - // Gatekeeper corresponds to the JSON schema field "gatekeeper". - Gatekeeper *SpecDistributionModulesPolicyGatekeeper `json:"gatekeeper,omitempty" yaml:"gatekeeper,omitempty" mapstructure:"gatekeeper,omitempty"` - - // Kyverno corresponds to the JSON schema field "kyverno". - Kyverno *SpecDistributionModulesPolicyKyverno `json:"kyverno,omitempty" yaml:"kyverno,omitempty" mapstructure:"kyverno,omitempty"` - - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - - // The type of security to use, either ***none***, ***gatekeeper*** or - // ***kyverno*** - Type SpecDistributionModulesPolicyType `json:"type" yaml:"type" mapstructure:"type"` -} - -type SpecDistributionModulesPolicyGatekeeper struct { - // This parameter adds namespaces to Gatekeeper's exemption list, so it will not - // enforce the constraints on them. - AdditionalExcludedNamespaces []string `json:"additionalExcludedNamespaces,omitempty" yaml:"additionalExcludedNamespaces,omitempty" mapstructure:"additionalExcludedNamespaces,omitempty"` - - // The enforcement action to use for the gatekeeper module - EnforcementAction SpecDistributionModulesPolicyGatekeeperEnforcementAction `json:"enforcementAction" yaml:"enforcementAction" mapstructure:"enforcementAction"` - - // If true, the default policies will be installed - InstallDefaultPolicies bool `json:"installDefaultPolicies" yaml:"installDefaultPolicies" mapstructure:"installDefaultPolicies"` - - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` -} - -type SpecDistributionModulesPolicyGatekeeperEnforcementAction string - -const ( - SpecDistributionModulesPolicyGatekeeperEnforcementActionDeny SpecDistributionModulesPolicyGatekeeperEnforcementAction = "deny" - SpecDistributionModulesPolicyGatekeeperEnforcementActionDryrun SpecDistributionModulesPolicyGatekeeperEnforcementAction = "dryrun" - SpecDistributionModulesPolicyGatekeeperEnforcementActionWarn SpecDistributionModulesPolicyGatekeeperEnforcementAction = "warn" -) - -type SpecDistributionModulesPolicyKyverno struct { - // This parameter adds namespaces to Kyverno's exemption list, so it will not - // enforce the constraints on them. - AdditionalExcludedNamespaces []string `json:"additionalExcludedNamespaces,omitempty" yaml:"additionalExcludedNamespaces,omitempty" mapstructure:"additionalExcludedNamespaces,omitempty"` - - // If true, the default policies will be installed - InstallDefaultPolicies bool `json:"installDefaultPolicies" yaml:"installDefaultPolicies" mapstructure:"installDefaultPolicies"` - - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - - // The validation failure action to use for the kyverno module - ValidationFailureAction SpecDistributionModulesPolicyKyvernoValidationFailureAction `json:"validationFailureAction" yaml:"validationFailureAction" mapstructure:"validationFailureAction"` -} - -type SpecDistributionModulesPolicyKyvernoValidationFailureAction string - -const ( - SpecDistributionModulesPolicyKyvernoValidationFailureActionAudit SpecDistributionModulesPolicyKyvernoValidationFailureAction = "Audit" - SpecDistributionModulesPolicyKyvernoValidationFailureActionEnforce SpecDistributionModulesPolicyKyvernoValidationFailureAction = "Enforce" -) - -type SpecDistributionModulesPolicyType string - -const ( - SpecDistributionModulesPolicyTypeGatekeeper SpecDistributionModulesPolicyType = "gatekeeper" - SpecDistributionModulesPolicyTypeKyverno SpecDistributionModulesPolicyType = "kyverno" - SpecDistributionModulesPolicyTypeNone SpecDistributionModulesPolicyType = "none" -) - -type SpecDistributionModulesTracing struct { - // Minio corresponds to the JSON schema field "minio". - Minio *SpecDistributionModulesTracingMinio `json:"minio,omitempty" yaml:"minio,omitempty" mapstructure:"minio,omitempty"` - - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - - // Tempo corresponds to the JSON schema field "tempo". - Tempo *SpecDistributionModulesTracingTempo `json:"tempo,omitempty" yaml:"tempo,omitempty" mapstructure:"tempo,omitempty"` - - // The type of tracing to use, either ***none*** or ***tempo*** - Type SpecDistributionModulesTracingType `json:"type" yaml:"type" mapstructure:"type"` -} - -type SpecDistributionModulesTracingMinio struct { - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - - // RootUser corresponds to the JSON schema field "rootUser". - RootUser *SpecDistributionModulesTracingMinioRootUser `json:"rootUser,omitempty" yaml:"rootUser,omitempty" mapstructure:"rootUser,omitempty"` - - // The storage size for the minio pods - StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"` -} - -type SpecDistributionModulesTracingMinioRootUser struct { - // The password for the minio root user - Password *string `json:"password,omitempty" yaml:"password,omitempty" mapstructure:"password,omitempty"` - - // The username for the minio root user - Username *string `json:"username,omitempty" yaml:"username,omitempty" mapstructure:"username,omitempty"` -} - -type SpecDistributionModulesTracingTempo struct { - // The backend for the tempo pods, must be ***minio*** or ***externalEndpoint*** - Backend *SpecDistributionModulesTracingTempoBackend `json:"backend,omitempty" yaml:"backend,omitempty" mapstructure:"backend,omitempty"` - - // ExternalEndpoint corresponds to the JSON schema field "externalEndpoint". - ExternalEndpoint *SpecDistributionModulesTracingTempoExternalEndpoint `json:"externalEndpoint,omitempty" yaml:"externalEndpoint,omitempty" mapstructure:"externalEndpoint,omitempty"` - - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - - // The retention time for the tempo pods - RetentionTime *string `json:"retentionTime,omitempty" yaml:"retentionTime,omitempty" mapstructure:"retentionTime,omitempty"` -} - -type SpecDistributionModulesTracingTempoBackend string - -const ( - SpecDistributionModulesTracingTempoBackendExternalEndpoint SpecDistributionModulesTracingTempoBackend = "externalEndpoint" - SpecDistributionModulesTracingTempoBackendMinio SpecDistributionModulesTracingTempoBackend = "minio" -) - -type SpecDistributionModulesTracingTempoExternalEndpoint struct { - // The access key id of the external tempo backend - AccessKeyId *string `json:"accessKeyId,omitempty" yaml:"accessKeyId,omitempty" mapstructure:"accessKeyId,omitempty"` - - // The bucket name of the external tempo backend - BucketName *string `json:"bucketName,omitempty" yaml:"bucketName,omitempty" mapstructure:"bucketName,omitempty"` - - // The endpoint of the external tempo backend - Endpoint *string `json:"endpoint,omitempty" yaml:"endpoint,omitempty" mapstructure:"endpoint,omitempty"` - - // If true, the external tempo backend will not use tls - Insecure *bool `json:"insecure,omitempty" yaml:"insecure,omitempty" mapstructure:"insecure,omitempty"` - - // The secret access key of the external tempo backend - SecretAccessKey *string `json:"secretAccessKey,omitempty" yaml:"secretAccessKey,omitempty" mapstructure:"secretAccessKey,omitempty"` -} - -type SpecDistributionModulesTracingType string - -const ( - SpecDistributionModulesTracingTypeNone SpecDistributionModulesTracingType = "none" - SpecDistributionModulesTracingTypeTempo SpecDistributionModulesTracingType = "tempo" -) - -type SpecPlugins struct { - // Helm corresponds to the JSON schema field "helm". - Helm *SpecPluginsHelm `json:"helm,omitempty" yaml:"helm,omitempty" mapstructure:"helm,omitempty"` - - // Kustomize corresponds to the JSON schema field "kustomize". - Kustomize SpecPluginsKustomize `json:"kustomize,omitempty" yaml:"kustomize,omitempty" mapstructure:"kustomize,omitempty"` -} - -type SpecPluginsHelm struct { - // Releases corresponds to the JSON schema field "releases". - Releases SpecPluginsHelmReleases `json:"releases,omitempty" yaml:"releases,omitempty" mapstructure:"releases,omitempty"` - - // Repositories corresponds to the JSON schema field "repositories". - Repositories SpecPluginsHelmRepositories `json:"repositories,omitempty" yaml:"repositories,omitempty" mapstructure:"repositories,omitempty"` -} - -type SpecPluginsHelmReleases []struct { - // The chart of the release - Chart string `json:"chart" yaml:"chart" mapstructure:"chart"` - - // The name of the release - Name string `json:"name" yaml:"name" mapstructure:"name"` - - // The namespace of the release - Namespace string `json:"namespace" yaml:"namespace" mapstructure:"namespace"` - - // Set corresponds to the JSON schema field "set". - Set []SpecPluginsHelmReleasesElemSetElem `json:"set,omitempty" yaml:"set,omitempty" mapstructure:"set,omitempty"` - - // The values of the release - Values []string `json:"values,omitempty" yaml:"values,omitempty" mapstructure:"values,omitempty"` - - // The version of the release - Version *string `json:"version,omitempty" yaml:"version,omitempty" mapstructure:"version,omitempty"` -} - -type SpecPluginsHelmReleasesElemSetElem struct { - // The name of the set - Name string `json:"name" yaml:"name" mapstructure:"name"` - - // The value of the set - Value string `json:"value" yaml:"value" mapstructure:"value"` -} - -type SpecPluginsHelmRepositories []struct { - // The name of the repository - Name string `json:"name" yaml:"name" mapstructure:"name"` - - // The url of the repository - Url string `json:"url" yaml:"url" mapstructure:"url"` -} - -type SpecPluginsKustomize []struct { - // The folder of the kustomize plugin - Folder string `json:"folder" yaml:"folder" mapstructure:"folder"` - - // The name of the kustomize plugin - Name string `json:"name" yaml:"name" mapstructure:"name"` -} - -type TypesCidr string - -type TypesEnvRef string - -type TypesFileRef string - -type TypesFuryModuleComponentOverrides struct { - // The node selector to use to place the pods for the minio module - NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` - - // The tolerations that will be added to the pods for the cert-manager module - Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` -} - -type TypesFuryModuleComponentOverrides_1 struct { - // NodeSelector corresponds to the JSON schema field "nodeSelector". - NodeSelector TypesKubeNodeSelector_1 `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` - - // Tolerations corresponds to the JSON schema field "tolerations". - Tolerations []TypesKubeToleration_1 `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` -} - -type TypesFuryModuleOverrides struct { - // Ingresses corresponds to the JSON schema field "ingresses". - Ingresses TypesFuryModuleOverridesIngresses `json:"ingresses,omitempty" yaml:"ingresses,omitempty" mapstructure:"ingresses,omitempty"` - - // The node selector to use to place the pods for the security module - NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` - - // The tolerations that will be added to the pods for the monitoring module - Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesIngressCertManagerClusterIssuer) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["email"]; !ok || v == nil { + return fmt.Errorf("field email in SpecDistributionModulesIngressCertManagerClusterIssuer: required") + } + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecDistributionModulesIngressCertManagerClusterIssuer: required") + } + type Plain SpecDistributionModulesIngressCertManagerClusterIssuer + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesIngressCertManagerClusterIssuer(plain) + return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModules) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesIngressNginx) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["dr"]; !ok || v == nil { - return fmt.Errorf("field dr in SpecDistributionModules: required") - } - if v, ok := raw["ingress"]; !ok || v == nil { - return fmt.Errorf("field ingress in SpecDistributionModules: required") - } - if v, ok := raw["logging"]; !ok || v == nil { - return fmt.Errorf("field logging in SpecDistributionModules: required") - } - if v, ok := raw["policy"]; !ok || v == nil { - return fmt.Errorf("field policy in SpecDistributionModules: required") + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesIngressNginx: required") } - type Plain SpecDistributionModules + type Plain SpecDistributionModulesIngressNginx var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModules(plain) + *j = SpecDistributionModulesIngressNginx(plain) return nil } -var enumValues_SpecDistributionModulesMonitoringType = []interface{}{ - "none", - "prometheus", - "prometheusAgent", - "mimir", -} - // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesMonitoringMimirBackend) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesIngressCertManagerClusterIssuerType) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesMonitoringMimirBackend { + for _, expected := range enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesMonitoringMimirBackend, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType, v) } - *j = SpecDistributionModulesMonitoringMimirBackend(v) + *j = SpecDistributionModulesIngressCertManagerClusterIssuerType(v) return nil } +var enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType = []interface{}{ + "http01", +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesMonitoring) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesDr) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesMonitoring: required") + return fmt.Errorf("field type in SpecDistributionModulesDr: required") } - type Plain SpecDistributionModulesMonitoring + type Plain SpecDistributionModulesDr var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesMonitoring(plain) + *j = SpecDistributionModulesDr(plain) return nil } -var enumValues_SpecDistributionModulesMonitoringMimirBackend = []interface{}{ - "minio", - "externalEndpoint", -} - // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesLogging) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesIngress) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesLogging: required") + if v, ok := raw["baseDomain"]; !ok || v == nil { + return fmt.Errorf("field baseDomain in SpecDistributionModulesIngress: required") } - type Plain SpecDistributionModulesLogging + if v, ok := raw["nginx"]; !ok || v == nil { + return fmt.Errorf("field nginx in SpecDistributionModulesIngress: required") + } + type Plain SpecDistributionModulesIngress var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesLogging(plain) + *j = SpecDistributionModulesIngress(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesNetworkingCilium) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesDrVeleroBackend) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesDrVeleroBackend { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesDrVeleroBackend, v) + } + *j = SpecDistributionModulesDrVeleroBackend(v) + return nil +} + +var enumValues_SpecDistributionModulesDrVeleroBackend = []interface{}{ + "minio", + "externalEndpoint", +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesLoggingCustomOutputs) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["maskSize"]; !ok || v == nil { - return fmt.Errorf("field maskSize in SpecDistributionModulesNetworkingCilium: required") + if v, ok := raw["audit"]; !ok || v == nil { + return fmt.Errorf("field audit in SpecDistributionModulesLoggingCustomOutputs: required") } - if v, ok := raw["podCidr"]; !ok || v == nil { - return fmt.Errorf("field podCidr in SpecDistributionModulesNetworkingCilium: required") + if v, ok := raw["errors"]; !ok || v == nil { + return fmt.Errorf("field errors in SpecDistributionModulesLoggingCustomOutputs: required") } - type Plain SpecDistributionModulesNetworkingCilium + if v, ok := raw["events"]; !ok || v == nil { + return fmt.Errorf("field events in SpecDistributionModulesLoggingCustomOutputs: required") + } + if v, ok := raw["infra"]; !ok || v == nil { + return fmt.Errorf("field infra in SpecDistributionModulesLoggingCustomOutputs: required") + } + if v, ok := raw["ingressNginx"]; !ok || v == nil { + return fmt.Errorf("field ingressNginx in SpecDistributionModulesLoggingCustomOutputs: required") + } + if v, ok := raw["kubernetes"]; !ok || v == nil { + return fmt.Errorf("field kubernetes in SpecDistributionModulesLoggingCustomOutputs: required") + } + if v, ok := raw["systemdCommon"]; !ok || v == nil { + return fmt.Errorf("field systemdCommon in SpecDistributionModulesLoggingCustomOutputs: required") + } + if v, ok := raw["systemdEtcd"]; !ok || v == nil { + return fmt.Errorf("field systemdEtcd in SpecDistributionModulesLoggingCustomOutputs: required") + } + type Plain SpecDistributionModulesLoggingCustomOutputs var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesNetworkingCilium(plain) + *j = SpecDistributionModulesLoggingCustomOutputs(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesLoggingType) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesDrType) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesLoggingType { + for _, expected := range enumValues_SpecDistributionModulesDrType { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingType, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesDrType, v) } - *j = SpecDistributionModulesLoggingType(v) + *j = SpecDistributionModulesDrType(v) return nil } -var enumValues_SpecDistributionModulesLoggingType = []interface{}{ - "none", - "opensearch", - "loki", - "customOutputs", +var enumValues_SpecDistributionModulesLoggingLokiBackend = []interface{}{ + "minio", + "externalEndpoint", } -var enumValues_SpecDistributionModulesNetworkingType = []interface{}{ +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesLoggingLokiBackend) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesLoggingLokiBackend { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingLokiBackend, v) + } + *j = SpecDistributionModulesLoggingLokiBackend(v) + return nil +} + +var enumValues_SpecDistributionModulesDrType = []interface{}{ "none", - "calico", - "cilium", + "on-premises", +} + +// Override the common configuration with a particular configuration for the +// module. +type TypesFuryModuleOverrides struct { + // Ingresses corresponds to the JSON schema field "ingresses". + Ingresses TypesFuryModuleOverridesIngresses `json:"ingresses,omitempty" yaml:"ingresses,omitempty" mapstructure:"ingresses,omitempty"` + + // Set to override the node selector used to place the pods of the module. + NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` + + // Set to override the tolerations that will be added to the pods of the module. + Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` +} + +type TypesFuryModuleOverridesIngresses map[string]TypesFuryModuleOverridesIngress + +type TypesKubeResourcesLimits struct { + // The CPU limit for the Pod. Example: `1000m`. + Cpu *string `json:"cpu,omitempty" yaml:"cpu,omitempty" mapstructure:"cpu,omitempty"` + + // The memory limit for the Pod. Example: `1G`. + Memory *string `json:"memory,omitempty" yaml:"memory,omitempty" mapstructure:"memory,omitempty"` +} + +type TypesKubeResourcesRequests struct { + // The CPU request for the Pod, in cores. Example: `500m`. + Cpu *string `json:"cpu,omitempty" yaml:"cpu,omitempty" mapstructure:"cpu,omitempty"` + + // The memory request for the Pod. Example: `500M`. + Memory *string `json:"memory,omitempty" yaml:"memory,omitempty" mapstructure:"memory,omitempty"` +} + +type TypesKubeResources struct { + // Limits corresponds to the JSON schema field "limits". + Limits *TypesKubeResourcesLimits `json:"limits,omitempty" yaml:"limits,omitempty" mapstructure:"limits,omitempty"` + + // Requests corresponds to the JSON schema field "requests". + Requests *TypesKubeResourcesRequests `json:"requests,omitempty" yaml:"requests,omitempty" mapstructure:"requests,omitempty"` +} + +type TypesFuryModuleOverridesIngress struct { + // If true, the ingress will not have authentication even if + // `.spec.modules.auth.provider.type` is SSO or Basic Auth. + DisableAuth *bool `json:"disableAuth,omitempty" yaml:"disableAuth,omitempty" mapstructure:"disableAuth,omitempty"` + + // Use this host for the ingress instead of the default one. + Host *string `json:"host,omitempty" yaml:"host,omitempty" mapstructure:"host,omitempty"` + + // Use this ingress class for the ingress instead of the default one. + IngressClass *string `json:"ingressClass,omitempty" yaml:"ingressClass,omitempty" mapstructure:"ingressClass,omitempty"` +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesAuth) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["provider"]; !ok || v == nil { + return fmt.Errorf("field provider in SpecDistributionModulesAuth: required") + } + type Plain SpecDistributionModulesAuth + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesAuth(plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesAuthProvider) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesAuthProvider: required") + } + type Plain SpecDistributionModulesAuthProvider + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesAuthProvider(plain) + return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesNetworkingType) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesAuthProviderType) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesNetworkingType { + for _, expected := range enumValues_SpecDistributionModulesAuthProviderType { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesNetworkingType, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesAuthProviderType, v) } - *j = SpecDistributionModulesNetworkingType(v) + *j = SpecDistributionModulesAuthProviderType(v) return nil } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesLoggingOpensearch) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesLoggingOpensearch: required") - } - type Plain SpecDistributionModulesLoggingOpensearch - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistributionModulesLoggingOpensearch(plain) - return nil +var enumValues_SpecDistributionModulesLoggingOpensearchType = []interface{}{ + "single", + "triple", } // UnmarshalJSON implements json.Unmarshaler. @@ -1551,827 +1404,1133 @@ func (j *SpecDistributionModulesLoggingOpensearchType) UnmarshalJSON(b []byte) e return nil } -type TypesKubeResources struct { - // Limits corresponds to the JSON schema field "limits". - Limits *TypesKubeResourcesLimits `json:"limits,omitempty" yaml:"limits,omitempty" mapstructure:"limits,omitempty"` - - // Requests corresponds to the JSON schema field "requests". - Requests *TypesKubeResourcesRequests `json:"requests,omitempty" yaml:"requests,omitempty" mapstructure:"requests,omitempty"` -} - -var enumValues_SpecDistributionModulesLoggingOpensearchType = []interface{}{ - "single", - "triple", +var enumValues_SpecDistributionModulesAuthProviderType = []interface{}{ + "none", + "basicAuth", + "sso", } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesNetworking) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesAuthProviderBasicAuth) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesNetworking: required") + if v, ok := raw["password"]; !ok || v == nil { + return fmt.Errorf("field password in SpecDistributionModulesAuthProviderBasicAuth: required") } - type Plain SpecDistributionModulesNetworking + if v, ok := raw["username"]; !ok || v == nil { + return fmt.Errorf("field username in SpecDistributionModulesAuthProviderBasicAuth: required") + } + type Plain SpecDistributionModulesAuthProviderBasicAuth var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesNetworking(plain) + *j = SpecDistributionModulesAuthProviderBasicAuth(plain) return nil } -type TypesKubeResourcesRequests struct { - // The cpu request for the prometheus pods - Cpu *string `json:"cpu,omitempty" yaml:"cpu,omitempty" mapstructure:"cpu,omitempty"` - - // The memory request for the opensearch pods - Memory *string `json:"memory,omitempty" yaml:"memory,omitempty" mapstructure:"memory,omitempty"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesAuthOverridesIngress) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["host"]; !ok || v == nil { + return fmt.Errorf("field host in SpecDistributionModulesAuthOverridesIngress: required") + } + if v, ok := raw["ingressClass"]; !ok || v == nil { + return fmt.Errorf("field ingressClass in SpecDistributionModulesAuthOverridesIngress: required") + } + type Plain SpecDistributionModulesAuthOverridesIngress + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesAuthOverridesIngress(plain) + return nil } -var enumValues_SpecDistributionModulesPolicyGatekeeperEnforcementAction = []interface{}{ - "deny", - "dryrun", - "warn", +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesLoggingOpensearch) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesLoggingOpensearch: required") + } + type Plain SpecDistributionModulesLoggingOpensearch + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesLoggingOpensearch(plain) + return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesPolicyGatekeeperEnforcementAction) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { +func (j *SpecDistributionModulesAuthDex) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { return err } - var ok bool - for _, expected := range enumValues_SpecDistributionModulesPolicyGatekeeperEnforcementAction { - if reflect.DeepEqual(v, expected) { - ok = true - break - } + if v, ok := raw["connectors"]; !ok || v == nil { + return fmt.Errorf("field connectors in SpecDistributionModulesAuthDex: required") } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesPolicyGatekeeperEnforcementAction, v) + type Plain SpecDistributionModulesAuthDex + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err } - *j = SpecDistributionModulesPolicyGatekeeperEnforcementAction(v) + *j = SpecDistributionModulesAuthDex(plain) return nil } -type TypesKubeResourcesLimits struct { - // The cpu limit for the loki pods - Cpu *string `json:"cpu,omitempty" yaml:"cpu,omitempty" mapstructure:"cpu,omitempty"` +type TypesFuryModuleComponentOverrides struct { + // Set to override the node selector used to place the pods of the package. + NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` - // The memory limit for the opensearch pods - Memory *string `json:"memory,omitempty" yaml:"memory,omitempty" mapstructure:"memory,omitempty"` + // Set to override the tolerations that will be added to the pods of the package. + Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` +} + +var enumValues_SpecDistributionModulesLoggingType = []interface{}{ + "none", + "opensearch", + "loki", + "customOutputs", } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesLoggingLokiBackend) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesLoggingType) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesLoggingLokiBackend { + for _, expected := range enumValues_SpecDistributionModulesLoggingType { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingLokiBackend, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingType, v) } - *j = SpecDistributionModulesLoggingLokiBackend(v) + *j = SpecDistributionModulesLoggingType(v) return nil } -var enumValues_SpecDistributionModulesLoggingLokiBackend = []interface{}{ - "minio", - "externalEndpoint", -} +const ( + SpecDistributionModulesLoggingTypeNone SpecDistributionModulesLoggingType = "none" + SpecDistributionModulesLoggingTypeOpensearch SpecDistributionModulesLoggingType = "opensearch" +) // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesLoggingCustomOutputs) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionCustomPatchesSecretGeneratorResource) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["audit"]; !ok || v == nil { - return fmt.Errorf("field audit in SpecDistributionModulesLoggingCustomOutputs: required") - } - if v, ok := raw["errors"]; !ok || v == nil { - return fmt.Errorf("field errors in SpecDistributionModulesLoggingCustomOutputs: required") - } - if v, ok := raw["events"]; !ok || v == nil { - return fmt.Errorf("field events in SpecDistributionModulesLoggingCustomOutputs: required") - } - if v, ok := raw["infra"]; !ok || v == nil { - return fmt.Errorf("field infra in SpecDistributionModulesLoggingCustomOutputs: required") - } - if v, ok := raw["ingressNginx"]; !ok || v == nil { - return fmt.Errorf("field ingressNginx in SpecDistributionModulesLoggingCustomOutputs: required") - } - if v, ok := raw["kubernetes"]; !ok || v == nil { - return fmt.Errorf("field kubernetes in SpecDistributionModulesLoggingCustomOutputs: required") - } - if v, ok := raw["systemdCommon"]; !ok || v == nil { - return fmt.Errorf("field systemdCommon in SpecDistributionModulesLoggingCustomOutputs: required") - } - if v, ok := raw["systemdEtcd"]; !ok || v == nil { - return fmt.Errorf("field systemdEtcd in SpecDistributionModulesLoggingCustomOutputs: required") + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecDistributionCustomPatchesSecretGeneratorResource: required") } - type Plain SpecDistributionModulesLoggingCustomOutputs + type Plain SpecDistributionCustomPatchesSecretGeneratorResource var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesLoggingCustomOutputs(plain) + *j = SpecDistributionCustomPatchesSecretGeneratorResource(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesPolicyGatekeeper) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { +func (j *SpecDistributionCustomPatchesSecretGeneratorResourceBehavior) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { return err } - if v, ok := raw["enforcementAction"]; !ok || v == nil { - return fmt.Errorf("field enforcementAction in SpecDistributionModulesPolicyGatekeeper: required") - } - if v, ok := raw["installDefaultPolicies"]; !ok || v == nil { - return fmt.Errorf("field installDefaultPolicies in SpecDistributionModulesPolicyGatekeeper: required") + var ok bool + for _, expected := range enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior { + if reflect.DeepEqual(v, expected) { + ok = true + break + } } - type Plain SpecDistributionModulesPolicyGatekeeper - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior, v) } - *j = SpecDistributionModulesPolicyGatekeeper(plain) + *j = SpecDistributionCustomPatchesSecretGeneratorResourceBehavior(v) return nil } +var enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior = []interface{}{ + "create", + "replace", + "merge", +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngress) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesLogging) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["baseDomain"]; !ok || v == nil { - return fmt.Errorf("field baseDomain in SpecDistributionModulesIngress: required") - } - if v, ok := raw["nginx"]; !ok || v == nil { - return fmt.Errorf("field nginx in SpecDistributionModulesIngress: required") + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesLogging: required") } - type Plain SpecDistributionModulesIngress + type Plain SpecDistributionModulesLogging var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesIngress(plain) + *j = SpecDistributionModulesLogging(plain) return nil } -var enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction = []interface{}{ - "Audit", - "Enforce", +type SpecDistributionModulesMonitoringAlertManager struct { + // The webhook URL to send dead man's switch monitoring, for example to use with + // healthchecks.io. + DeadManSwitchWebhookUrl *string `json:"deadManSwitchWebhookUrl,omitempty" yaml:"deadManSwitchWebhookUrl,omitempty" mapstructure:"deadManSwitchWebhookUrl,omitempty"` + + // Set to false to avoid installing the Prometheus rules (alerts) included with + // the distribution. + InstallDefaultRules *bool `json:"installDefaultRules,omitempty" yaml:"installDefaultRules,omitempty" mapstructure:"installDefaultRules,omitempty"` + + // The Slack webhook URL where to send the infrastructural and workload alerts to. + SlackWebhookUrl *string `json:"slackWebhookUrl,omitempty" yaml:"slackWebhookUrl,omitempty" mapstructure:"slackWebhookUrl,omitempty"` +} + +type SpecDistributionModulesMonitoringBlackboxExporter struct { + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` +} + +type SpecDistributionModulesMonitoringGrafana struct { + // Setting this to true will deploy an additional `grafana-basic-auth` ingress + // protected with Grafana's basic auth instead of SSO. It's intended use is as a + // temporary ingress for when there are problems with the SSO login flow. + // + // Notice that by default anonymous access is enabled. + BasicAuthIngress *bool `json:"basicAuthIngress,omitempty" yaml:"basicAuthIngress,omitempty" mapstructure:"basicAuthIngress,omitempty"` + + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // [JMESPath](http://jmespath.org/examples.html) expression to retrieve the user's + // role. Example: + // + // ```yaml + // usersRoleAttributePath: "contains(groups[*], 'beta') && 'Admin' || + // contains(groups[*], 'gamma') && 'Editor' || contains(groups[*], 'delta') && + // 'Viewer' + // ``` + // + // More details in [Grafana's + // documentation](https://grafana.com/docs/grafana/latest/setup-grafana/configure-security/configure-authentication/generic-oauth/#configure-role-mapping). + UsersRoleAttributePath *string `json:"usersRoleAttributePath,omitempty" yaml:"usersRoleAttributePath,omitempty" mapstructure:"usersRoleAttributePath,omitempty"` +} + +type SpecDistributionModulesMonitoringKubeStateMetrics struct { + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` +} + +type SpecDistributionModulesMonitoringMimirBackend string + +var enumValues_SpecDistributionModulesMonitoringMimirBackend = []interface{}{ + "minio", + "externalEndpoint", } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesPolicyKyvernoValidationFailureAction) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesMonitoringMimirBackend) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction { + for _, expected := range enumValues_SpecDistributionModulesMonitoringMimirBackend { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesMonitoringMimirBackend, v) } - *j = SpecDistributionModulesPolicyKyvernoValidationFailureAction(v) + *j = SpecDistributionModulesMonitoringMimirBackend(v) return nil } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressNginx) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesIngressNginx: required") - } - type Plain SpecDistributionModulesIngressNginx - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistributionModulesIngressNginx(plain) - return nil +const ( + SpecDistributionModulesMonitoringMimirBackendMinio SpecDistributionModulesMonitoringMimirBackend = "minio" + SpecDistributionModulesMonitoringMimirBackendExternalEndpoint SpecDistributionModulesMonitoringMimirBackend = "externalEndpoint" +) + +// Configuration for Mimir's external storage backend. +type SpecDistributionModulesMonitoringMimirExternalEndpoint struct { + // The access key ID (username) for the external S3-compatible bucket. + AccessKeyId *string `json:"accessKeyId,omitempty" yaml:"accessKeyId,omitempty" mapstructure:"accessKeyId,omitempty"` + + // The bucket name of the external S3-compatible object storage. + BucketName *string `json:"bucketName,omitempty" yaml:"bucketName,omitempty" mapstructure:"bucketName,omitempty"` + + // External S3-compatible endpoint for Mimir's storage. + Endpoint *string `json:"endpoint,omitempty" yaml:"endpoint,omitempty" mapstructure:"endpoint,omitempty"` + + // If true, will use HTTP as protocol instead of HTTPS. + Insecure *bool `json:"insecure,omitempty" yaml:"insecure,omitempty" mapstructure:"insecure,omitempty"` + + // The secret access key (password) for the external S3-compatible bucket. + SecretAccessKey *string `json:"secretAccessKey,omitempty" yaml:"secretAccessKey,omitempty" mapstructure:"secretAccessKey,omitempty"` +} + +// Configuration for the Mimir package. +type SpecDistributionModulesMonitoringMimir struct { + // The storage backend type for Mimir. `minio` will use an in-cluster MinIO + // deployment for object storage, `externalEndpoint` can be used to point to an + // external S3-compatible object storage instead of deploying an in-cluster MinIO. + Backend *SpecDistributionModulesMonitoringMimirBackend `json:"backend,omitempty" yaml:"backend,omitempty" mapstructure:"backend,omitempty"` + + // Configuration for Mimir's external storage backend. + ExternalEndpoint *SpecDistributionModulesMonitoringMimirExternalEndpoint `json:"externalEndpoint,omitempty" yaml:"externalEndpoint,omitempty" mapstructure:"externalEndpoint,omitempty"` + + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // The retention time for the logs stored in Mimir. Default is `30d`. Value must + // match the regular expression `[0-9]+(ns|us|µs|ms|s|m|h|d|w|y)` where y = 365 + // days. + RetentionTime *string `json:"retentionTime,omitempty" yaml:"retentionTime,omitempty" mapstructure:"retentionTime,omitempty"` +} + +type SpecDistributionModulesMonitoringMinioRootUser struct { + // The password for the default MinIO root user. + Password *string `json:"password,omitempty" yaml:"password,omitempty" mapstructure:"password,omitempty"` + + // The username for the default MinIO root user. + Username *string `json:"username,omitempty" yaml:"username,omitempty" mapstructure:"username,omitempty"` +} + +// Configuration for Monitoring's MinIO deployment. +type SpecDistributionModulesMonitoringMinio struct { + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // RootUser corresponds to the JSON schema field "rootUser". + RootUser *SpecDistributionModulesMonitoringMinioRootUser `json:"rootUser,omitempty" yaml:"rootUser,omitempty" mapstructure:"rootUser,omitempty"` + + // The PVC size for each MinIO disk, 6 disks total. + StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"` +} + +type SpecDistributionModulesMonitoringPrometheusRemoteWriteElem map[string]interface{} + +type SpecDistributionModulesMonitoringPrometheus struct { + // Set this option to ship the collected metrics to a remote Prometheus receiver. + // + // `remoteWrite` is an array of objects that allows configuring the + // [remoteWrite](https://prometheus.io/docs/specs/remote_write_spec/) options for + // Prometheus. The objects in the array follow [the same schema as in the + // prometheus + // operator](https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.RemoteWriteSpec). + RemoteWrite []SpecDistributionModulesMonitoringPrometheusRemoteWriteElem `json:"remoteWrite,omitempty" yaml:"remoteWrite,omitempty" mapstructure:"remoteWrite,omitempty"` + + // Resources corresponds to the JSON schema field "resources". + Resources *TypesKubeResources `json:"resources,omitempty" yaml:"resources,omitempty" mapstructure:"resources,omitempty"` + + // The retention size for the `k8s` Prometheus instance. + RetentionSize *string `json:"retentionSize,omitempty" yaml:"retentionSize,omitempty" mapstructure:"retentionSize,omitempty"` + + // The retention time for the `k8s` Prometheus instance. + RetentionTime *string `json:"retentionTime,omitempty" yaml:"retentionTime,omitempty" mapstructure:"retentionTime,omitempty"` + + // The storage size for the `k8s` Prometheus instance. + StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"` +} + +type SpecDistributionModulesMonitoringPrometheusAgentRemoteWriteElem map[string]interface{} + +type SpecDistributionModulesMonitoringPrometheusAgent struct { + // Set this option to ship the collected metrics to a remote Prometheus receiver. + // + // `remoteWrite` is an array of objects that allows configuring the + // [remoteWrite](https://prometheus.io/docs/specs/remote_write_spec/) options for + // Prometheus. The objects in the array follow [the same schema as in the + // prometheus + // operator](https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.RemoteWriteSpec). + RemoteWrite []SpecDistributionModulesMonitoringPrometheusAgentRemoteWriteElem `json:"remoteWrite,omitempty" yaml:"remoteWrite,omitempty" mapstructure:"remoteWrite,omitempty"` + + // Resources corresponds to the JSON schema field "resources". + Resources *TypesKubeResources `json:"resources,omitempty" yaml:"resources,omitempty" mapstructure:"resources,omitempty"` +} + +type SpecDistributionModulesMonitoringType string + +var enumValues_SpecDistributionModulesMonitoringType = []interface{}{ + "none", + "prometheus", + "prometheusAgent", + "mimir", } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressNginxType) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesMonitoringType) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesIngressNginxType { + for _, expected := range enumValues_SpecDistributionModulesMonitoringType { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressNginxType, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesMonitoringType, v) } - *j = SpecDistributionModulesIngressNginxType(v) + *j = SpecDistributionModulesMonitoringType(v) return nil } -var enumValues_SpecDistributionModulesIngressNginxType = []interface{}{ - "none", - "single", - "dual", -} +const ( + SpecDistributionModulesMonitoringTypeNone SpecDistributionModulesMonitoringType = "none" + SpecDistributionModulesMonitoringTypePrometheus SpecDistributionModulesMonitoringType = "prometheus" + SpecDistributionModulesMonitoringTypePrometheusAgent SpecDistributionModulesMonitoringType = "prometheusAgent" + SpecDistributionModulesMonitoringTypeMimir SpecDistributionModulesMonitoringType = "mimir" +) -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesPolicyKyverno) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["installDefaultPolicies"]; !ok || v == nil { - return fmt.Errorf("field installDefaultPolicies in SpecDistributionModulesPolicyKyverno: required") - } - if v, ok := raw["validationFailureAction"]; !ok || v == nil { - return fmt.Errorf("field validationFailureAction in SpecDistributionModulesPolicyKyverno: required") - } - type Plain SpecDistributionModulesPolicyKyverno - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistributionModulesPolicyKyverno(plain) - return nil +type SpecDistributionModulesMonitoringX509Exporter struct { + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressNginxTLS) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["provider"]; !ok || v == nil { - return fmt.Errorf("field provider in SpecDistributionModulesIngressNginxTLS: required") - } - type Plain SpecDistributionModulesIngressNginxTLS - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistributionModulesIngressNginxTLS(plain) - return nil -} +// Configuration for the Monitoring module. +type SpecDistributionModulesMonitoring struct { + // Alertmanager corresponds to the JSON schema field "alertmanager". + Alertmanager *SpecDistributionModulesMonitoringAlertManager `json:"alertmanager,omitempty" yaml:"alertmanager,omitempty" mapstructure:"alertmanager,omitempty"` -var enumValues_SpecDistributionModulesPolicyType = []interface{}{ - "none", - "gatekeeper", - "kyverno", + // BlackboxExporter corresponds to the JSON schema field "blackboxExporter". + BlackboxExporter *SpecDistributionModulesMonitoringBlackboxExporter `json:"blackboxExporter,omitempty" yaml:"blackboxExporter,omitempty" mapstructure:"blackboxExporter,omitempty"` + + // Grafana corresponds to the JSON schema field "grafana". + Grafana *SpecDistributionModulesMonitoringGrafana `json:"grafana,omitempty" yaml:"grafana,omitempty" mapstructure:"grafana,omitempty"` + + // KubeStateMetrics corresponds to the JSON schema field "kubeStateMetrics". + KubeStateMetrics *SpecDistributionModulesMonitoringKubeStateMetrics `json:"kubeStateMetrics,omitempty" yaml:"kubeStateMetrics,omitempty" mapstructure:"kubeStateMetrics,omitempty"` + + // Mimir corresponds to the JSON schema field "mimir". + Mimir *SpecDistributionModulesMonitoringMimir `json:"mimir,omitempty" yaml:"mimir,omitempty" mapstructure:"mimir,omitempty"` + + // Minio corresponds to the JSON schema field "minio". + Minio *SpecDistributionModulesMonitoringMinio `json:"minio,omitempty" yaml:"minio,omitempty" mapstructure:"minio,omitempty"` + + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // Prometheus corresponds to the JSON schema field "prometheus". + Prometheus *SpecDistributionModulesMonitoringPrometheus `json:"prometheus,omitempty" yaml:"prometheus,omitempty" mapstructure:"prometheus,omitempty"` + + // PrometheusAgent corresponds to the JSON schema field "prometheusAgent". + PrometheusAgent *SpecDistributionModulesMonitoringPrometheusAgent `json:"prometheusAgent,omitempty" yaml:"prometheusAgent,omitempty" mapstructure:"prometheusAgent,omitempty"` + + // The type of the monitoring, must be `none`, `prometheus`, `prometheusAgent` or + // `mimir`. + // + // - `none`: will disable the whole monitoring stack. + // - `prometheus`: will install Prometheus Operator and a preconfigured Prometheus + // instace, Alertmanager, a set of alert rules, exporters needed to monitor all + // the components of the cluster, Grafana and a series of dashboards to view the + // collected metrics, and more. + // - `prometheusAgent`: wil install Prometheus operator, an instance of Prometheus + // in Agent mode (no alerting, no queries, no storage), and all the exporters + // needed to get metrics for the status of the cluster and the workloads. Useful + // when having a centralized (remote) Prometheus where to ship the metrics and not + // storing them locally in the cluster. + // - `mimir`: will install the same as the `prometheus` option, plus Grafana Mimir + // that allows for longer retention of metrics and the usage of Object Storage. + // + // Default is `prometheus`. + Type SpecDistributionModulesMonitoringType `json:"type" yaml:"type" mapstructure:"type"` + + // X509Exporter corresponds to the JSON schema field "x509Exporter". + X509Exporter *SpecDistributionModulesMonitoringX509Exporter `json:"x509Exporter,omitempty" yaml:"x509Exporter,omitempty" mapstructure:"x509Exporter,omitempty"` } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesPolicyType) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { +func (j *SpecDistributionModulesMonitoring) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { return err } - var ok bool - for _, expected := range enumValues_SpecDistributionModulesPolicyType { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesPolicyType, v) + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesMonitoring: required") } - *j = SpecDistributionModulesPolicyType(v) + type Plain SpecDistributionModulesMonitoring + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesMonitoring(plain) return nil } +type TypesCidr string + +type SpecDistributionModulesNetworkingCilium struct { + // The mask size to use for the Pods network on each node. + MaskSize string `json:"maskSize" yaml:"maskSize" mapstructure:"maskSize"` + + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // Allows specifing a CIDR for the Pods network different from + // `.spec.kubernetes.podCidr`. If not set the default is to use + // `.spec.kubernetes.podCidr`. + PodCidr TypesCidr `json:"podCidr" yaml:"podCidr" mapstructure:"podCidr"` +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressNginxTLSSecret) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesNetworkingCilium) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["ca"]; !ok || v == nil { - return fmt.Errorf("field ca in SpecDistributionModulesIngressNginxTLSSecret: required") - } - if v, ok := raw["cert"]; !ok || v == nil { - return fmt.Errorf("field cert in SpecDistributionModulesIngressNginxTLSSecret: required") + if v, ok := raw["maskSize"]; !ok || v == nil { + return fmt.Errorf("field maskSize in SpecDistributionModulesNetworkingCilium: required") } - if v, ok := raw["key"]; !ok || v == nil { - return fmt.Errorf("field key in SpecDistributionModulesIngressNginxTLSSecret: required") + if v, ok := raw["podCidr"]; !ok || v == nil { + return fmt.Errorf("field podCidr in SpecDistributionModulesNetworkingCilium: required") } - type Plain SpecDistributionModulesIngressNginxTLSSecret + type Plain SpecDistributionModulesNetworkingCilium var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesIngressNginxTLSSecret(plain) + *j = SpecDistributionModulesNetworkingCilium(plain) return nil } +type SpecDistributionModulesNetworkingTigeraOperator struct { + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` +} + +type SpecDistributionModulesNetworkingType string + +var enumValues_SpecDistributionModulesNetworkingType = []interface{}{ + "none", + "calico", + "cilium", +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressNginxTLSProvider) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesNetworkingType) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesIngressNginxTLSProvider { + for _, expected := range enumValues_SpecDistributionModulesNetworkingType { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressNginxTLSProvider, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesNetworkingType, v) } - *j = SpecDistributionModulesIngressNginxTLSProvider(v) + *j = SpecDistributionModulesNetworkingType(v) return nil } -var enumValues_SpecDistributionModulesIngressNginxTLSProvider = []interface{}{ - "certManager", - "secret", - "none", -} +const ( + SpecDistributionModulesNetworkingTypeNone SpecDistributionModulesNetworkingType = "none" + SpecDistributionModulesNetworkingTypeCalico SpecDistributionModulesNetworkingType = "calico" + SpecDistributionModulesNetworkingTypeCilium SpecDistributionModulesNetworkingType = "cilium" +) -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressCertManager) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["clusterIssuer"]; !ok || v == nil { - return fmt.Errorf("field clusterIssuer in SpecDistributionModulesIngressCertManager: required") - } - type Plain SpecDistributionModulesIngressCertManager - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistributionModulesIngressCertManager(plain) - return nil +// Configuration for the Networking module. +type SpecDistributionModulesNetworking struct { + // Cilium corresponds to the JSON schema field "cilium". + Cilium *SpecDistributionModulesNetworkingCilium `json:"cilium,omitempty" yaml:"cilium,omitempty" mapstructure:"cilium,omitempty"` + + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // TigeraOperator corresponds to the JSON schema field "tigeraOperator". + TigeraOperator *SpecDistributionModulesNetworkingTigeraOperator `json:"tigeraOperator,omitempty" yaml:"tigeraOperator,omitempty" mapstructure:"tigeraOperator,omitempty"` + + // The type of CNI plugin to use, either `none`, `calico` (Tigera Operator) or + // `cilium`. + Type SpecDistributionModulesNetworkingType `json:"type" yaml:"type" mapstructure:"type"` } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesPolicy) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesNetworking) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesPolicy: required") + return fmt.Errorf("field type in SpecDistributionModulesNetworking: required") } - type Plain SpecDistributionModulesPolicy + type Plain SpecDistributionModulesNetworking var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesPolicy(plain) + *j = SpecDistributionModulesNetworking(plain) return nil } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressCertManagerClusterIssuer) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["email"]; !ok || v == nil { - return fmt.Errorf("field email in SpecDistributionModulesIngressCertManagerClusterIssuer: required") - } - if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecDistributionModulesIngressCertManagerClusterIssuer: required") - } - type Plain SpecDistributionModulesIngressCertManagerClusterIssuer - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistributionModulesIngressCertManagerClusterIssuer(plain) - return nil +type SpecDistributionModulesPolicyGatekeeperEnforcementAction string + +var enumValues_SpecDistributionModulesPolicyGatekeeperEnforcementAction = []interface{}{ + "deny", + "dryrun", + "warn", } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressCertManagerClusterIssuerType) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesPolicyGatekeeperEnforcementAction) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType { + for _, expected := range enumValues_SpecDistributionModulesPolicyGatekeeperEnforcementAction { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesPolicyGatekeeperEnforcementAction, v) } - *j = SpecDistributionModulesIngressCertManagerClusterIssuerType(v) + *j = SpecDistributionModulesPolicyGatekeeperEnforcementAction(v) return nil } -var enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType = []interface{}{ - "http01", -} +const ( + SpecDistributionModulesPolicyGatekeeperEnforcementActionDeny SpecDistributionModulesPolicyGatekeeperEnforcementAction = "deny" + SpecDistributionModulesPolicyGatekeeperEnforcementActionDryrun SpecDistributionModulesPolicyGatekeeperEnforcementAction = "dryrun" + SpecDistributionModulesPolicyGatekeeperEnforcementActionWarn SpecDistributionModulesPolicyGatekeeperEnforcementAction = "warn" +) -var enumValues_SpecDistributionModulesTracingTempoBackend = []interface{}{ - "minio", - "externalEndpoint", -} +// Configuration for the Gatekeeper package. +type SpecDistributionModulesPolicyGatekeeper struct { + // This parameter adds namespaces to Gatekeeper's exemption list, so it will not + // enforce the constraints on them. + AdditionalExcludedNamespaces []string `json:"additionalExcludedNamespaces,omitempty" yaml:"additionalExcludedNamespaces,omitempty" mapstructure:"additionalExcludedNamespaces,omitempty"` -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesTracingTempoBackend) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_SpecDistributionModulesTracingTempoBackend { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesTracingTempoBackend, v) - } - *j = SpecDistributionModulesTracingTempoBackend(v) - return nil + // The default enforcement action to use for the included constraints. `deny` will + // block the admission when violations to the policies are found, `warn` will show + // a message to the user but will admit the violating requests and `dryrun` won't + // give any feedback to the user but it will log the violations. + EnforcementAction SpecDistributionModulesPolicyGatekeeperEnforcementAction `json:"enforcementAction" yaml:"enforcementAction" mapstructure:"enforcementAction"` + + // Set to `false` to avoid installing the default Gatekeeper policies (constraints + // templates and constraints) included with the distribution. + InstallDefaultPolicies bool `json:"installDefaultPolicies" yaml:"installDefaultPolicies" mapstructure:"installDefaultPolicies"` + + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesDr) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesPolicyGatekeeper) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesDr: required") + if v, ok := raw["enforcementAction"]; !ok || v == nil { + return fmt.Errorf("field enforcementAction in SpecDistributionModulesPolicyGatekeeper: required") } - type Plain SpecDistributionModulesDr + if v, ok := raw["installDefaultPolicies"]; !ok || v == nil { + return fmt.Errorf("field installDefaultPolicies in SpecDistributionModulesPolicyGatekeeper: required") + } + type Plain SpecDistributionModulesPolicyGatekeeper var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesDr(plain) + *j = SpecDistributionModulesPolicyGatekeeper(plain) return nil } +type SpecDistributionModulesPolicyKyvernoValidationFailureAction string + +var enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction = []interface{}{ + "Audit", + "Enforce", +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesDrVeleroBackend) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { +func (j *SpecDistributionModulesIngressNginxTLS) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { return err } - var ok bool - for _, expected := range enumValues_SpecDistributionModulesDrVeleroBackend { - if reflect.DeepEqual(v, expected) { - ok = true - break - } + if v, ok := raw["provider"]; !ok || v == nil { + return fmt.Errorf("field provider in SpecDistributionModulesIngressNginxTLS: required") } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesDrVeleroBackend, v) + type Plain SpecDistributionModulesIngressNginxTLS + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err } - *j = SpecDistributionModulesDrVeleroBackend(v) + *j = SpecDistributionModulesIngressNginxTLS(plain) return nil } -var enumValues_SpecDistributionModulesDrVeleroBackend = []interface{}{ - "minio", - "externalEndpoint", +const ( + SpecDistributionModulesPolicyKyvernoValidationFailureActionAudit SpecDistributionModulesPolicyKyvernoValidationFailureAction = "Audit" + SpecDistributionModulesPolicyKyvernoValidationFailureActionEnforce SpecDistributionModulesPolicyKyvernoValidationFailureAction = "Enforce" +) + +// Configuration for the Kyverno package. +type SpecDistributionModulesPolicyKyverno struct { + // This parameter adds namespaces to Kyverno's exemption list, so it will not + // enforce the policies on them. + AdditionalExcludedNamespaces []string `json:"additionalExcludedNamespaces,omitempty" yaml:"additionalExcludedNamespaces,omitempty" mapstructure:"additionalExcludedNamespaces,omitempty"` + + // Set to `false` to avoid installing the default Kyverno policies included with + // distribution. + InstallDefaultPolicies bool `json:"installDefaultPolicies" yaml:"installDefaultPolicies" mapstructure:"installDefaultPolicies"` + + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // The validation failure action to use for the policies, `Enforce` will block + // when a request does not comply with the policies and `Audit` will not block but + // log when a request does not comply with the policies. + ValidationFailureAction SpecDistributionModulesPolicyKyvernoValidationFailureAction `json:"validationFailureAction" yaml:"validationFailureAction" mapstructure:"validationFailureAction"` } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesDrType) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { +func (j *SpecDistributionModulesPolicyKyverno) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { return err } - var ok bool - for _, expected := range enumValues_SpecDistributionModulesDrType { - if reflect.DeepEqual(v, expected) { - ok = true - break - } + if v, ok := raw["installDefaultPolicies"]; !ok || v == nil { + return fmt.Errorf("field installDefaultPolicies in SpecDistributionModulesPolicyKyverno: required") } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesDrType, v) + if v, ok := raw["validationFailureAction"]; !ok || v == nil { + return fmt.Errorf("field validationFailureAction in SpecDistributionModulesPolicyKyverno: required") } - *j = SpecDistributionModulesDrType(v) + type Plain SpecDistributionModulesPolicyKyverno + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesPolicyKyverno(plain) return nil } -var enumValues_SpecDistributionModulesDrType = []interface{}{ - "none", - "on-premises", -} +type SpecDistributionModulesPolicyType string -var enumValues_SpecDistributionModulesTracingType = []interface{}{ +var enumValues_SpecDistributionModulesPolicyType = []interface{}{ "none", - "tempo", + "gatekeeper", + "kyverno", } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesTracingType) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesPolicyType) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesTracingType { + for _, expected := range enumValues_SpecDistributionModulesPolicyType { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesTracingType, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesPolicyType, v) } - *j = SpecDistributionModulesTracingType(v) + *j = SpecDistributionModulesPolicyType(v) return nil } -type TypesFuryModuleOverridesIngress struct { - // If true, the ingress will not have authentication - DisableAuth *bool `json:"disableAuth,omitempty" yaml:"disableAuth,omitempty" mapstructure:"disableAuth,omitempty"` +const ( + SpecDistributionModulesPolicyTypeNone SpecDistributionModulesPolicyType = "none" + SpecDistributionModulesPolicyTypeGatekeeper SpecDistributionModulesPolicyType = "gatekeeper" + SpecDistributionModulesPolicyTypeKyverno SpecDistributionModulesPolicyType = "kyverno" +) - // The host of the ingress - Host *string `json:"host,omitempty" yaml:"host,omitempty" mapstructure:"host,omitempty"` +// Configuration for the Policy module. +type SpecDistributionModulesPolicy struct { + // Gatekeeper corresponds to the JSON schema field "gatekeeper". + Gatekeeper *SpecDistributionModulesPolicyGatekeeper `json:"gatekeeper,omitempty" yaml:"gatekeeper,omitempty" mapstructure:"gatekeeper,omitempty"` - // The ingress class of the ingress - IngressClass *string `json:"ingressClass,omitempty" yaml:"ingressClass,omitempty" mapstructure:"ingressClass,omitempty"` -} + // Kyverno corresponds to the JSON schema field "kyverno". + Kyverno *SpecDistributionModulesPolicyKyverno `json:"kyverno,omitempty" yaml:"kyverno,omitempty" mapstructure:"kyverno,omitempty"` -type TypesFuryModuleOverridesIngresses map[string]TypesFuryModuleOverridesIngress + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuth) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["provider"]; !ok || v == nil { - return fmt.Errorf("field provider in SpecDistributionModulesAuth: required") - } - type Plain SpecDistributionModulesAuth - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistributionModulesAuth(plain) - return nil + // The type of policy enforcement to use, either `none`, `gatekeeper` or + // `kyverno`. + // + // Default is `none`. + Type SpecDistributionModulesPolicyType `json:"type" yaml:"type" mapstructure:"type"` } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesTracing) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesPolicy) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesTracing: required") + return fmt.Errorf("field type in SpecDistributionModulesPolicy: required") } - type Plain SpecDistributionModulesTracing + type Plain SpecDistributionModulesPolicy var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesTracing(plain) + *j = SpecDistributionModulesPolicy(plain) return nil } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuthProvider) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesAuthProvider: required") - } - type Plain SpecDistributionModulesAuthProvider - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistributionModulesAuthProvider(plain) - return nil +type SpecDistributionModulesTracingMinioRootUser struct { + // The password for the default MinIO root user. + Password *string `json:"password,omitempty" yaml:"password,omitempty" mapstructure:"password,omitempty"` + + // The username for the default MinIO root user. + Username *string `json:"username,omitempty" yaml:"username,omitempty" mapstructure:"username,omitempty"` +} + +// Configuration for Tracing's MinIO deployment. +type SpecDistributionModulesTracingMinio struct { + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // RootUser corresponds to the JSON schema field "rootUser". + RootUser *SpecDistributionModulesTracingMinioRootUser `json:"rootUser,omitempty" yaml:"rootUser,omitempty" mapstructure:"rootUser,omitempty"` + + // The PVC size for each MinIO disk, 6 disks total. + StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"` +} + +type SpecDistributionModulesTracingTempoBackend string + +var enumValues_SpecDistributionModulesTracingTempoBackend = []interface{}{ + "minio", + "externalEndpoint", } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesMonitoringType) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesTracingTempoBackend) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesMonitoringType { + for _, expected := range enumValues_SpecDistributionModulesTracingTempoBackend { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesMonitoringType, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesTracingTempoBackend, v) } - *j = SpecDistributionModulesMonitoringType(v) + *j = SpecDistributionModulesTracingTempoBackend(v) return nil } +const ( + SpecDistributionModulesTracingTempoBackendMinio SpecDistributionModulesTracingTempoBackend = "minio" + SpecDistributionModulesTracingTempoBackendExternalEndpoint SpecDistributionModulesTracingTempoBackend = "externalEndpoint" +) + +// Configuration for Tempo's external storage backend. +type SpecDistributionModulesTracingTempoExternalEndpoint struct { + // The access key ID (username) for the external S3-compatible bucket. + AccessKeyId *string `json:"accessKeyId,omitempty" yaml:"accessKeyId,omitempty" mapstructure:"accessKeyId,omitempty"` + + // The bucket name of the external S3-compatible object storage. + BucketName *string `json:"bucketName,omitempty" yaml:"bucketName,omitempty" mapstructure:"bucketName,omitempty"` + + // External S3-compatible endpoint for Tempo's storage. + Endpoint *string `json:"endpoint,omitempty" yaml:"endpoint,omitempty" mapstructure:"endpoint,omitempty"` + + // If true, will use HTTP as protocol instead of HTTPS. + Insecure *bool `json:"insecure,omitempty" yaml:"insecure,omitempty" mapstructure:"insecure,omitempty"` + + // The secret access key (password) for the external S3-compatible bucket. + SecretAccessKey *string `json:"secretAccessKey,omitempty" yaml:"secretAccessKey,omitempty" mapstructure:"secretAccessKey,omitempty"` +} + +// Configuration for the Tempo package. +type SpecDistributionModulesTracingTempo struct { + // The storage backend type for Tempo. `minio` will use an in-cluster MinIO + // deployment for object storage, `externalEndpoint` can be used to point to an + // external S3-compatible object storage instead of deploying an in-cluster MinIO. + Backend *SpecDistributionModulesTracingTempoBackend `json:"backend,omitempty" yaml:"backend,omitempty" mapstructure:"backend,omitempty"` + + // Configuration for Tempo's external storage backend. + ExternalEndpoint *SpecDistributionModulesTracingTempoExternalEndpoint `json:"externalEndpoint,omitempty" yaml:"externalEndpoint,omitempty" mapstructure:"externalEndpoint,omitempty"` + + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // The retention time for the traces stored in Tempo. + RetentionTime *string `json:"retentionTime,omitempty" yaml:"retentionTime,omitempty" mapstructure:"retentionTime,omitempty"` +} + +type SpecDistributionModulesTracingType string + +var enumValues_SpecDistributionModulesTracingType = []interface{}{ + "none", + "tempo", +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuthProviderType) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesTracingType) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesAuthProviderType { + for _, expected := range enumValues_SpecDistributionModulesTracingType { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesAuthProviderType, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesTracingType, v) } - *j = SpecDistributionModulesAuthProviderType(v) + *j = SpecDistributionModulesTracingType(v) return nil } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistribution) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["kubeconfig"]; !ok || v == nil { - return fmt.Errorf("field kubeconfig in SpecDistribution: required") - } - if v, ok := raw["modules"]; !ok || v == nil { - return fmt.Errorf("field modules in SpecDistribution: required") - } - type Plain SpecDistribution - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistribution(plain) - return nil -} +const ( + SpecDistributionModulesTracingTypeNone SpecDistributionModulesTracingType = "none" + SpecDistributionModulesTracingTypeTempo SpecDistributionModulesTracingType = "tempo" +) -var enumValues_SpecDistributionModulesAuthProviderType = []interface{}{ - "none", - "basicAuth", - "sso", +// Configuration for the Tracing module. +type SpecDistributionModulesTracing struct { + // Minio corresponds to the JSON schema field "minio". + Minio *SpecDistributionModulesTracingMinio `json:"minio,omitempty" yaml:"minio,omitempty" mapstructure:"minio,omitempty"` + + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // Tempo corresponds to the JSON schema field "tempo". + Tempo *SpecDistributionModulesTracingTempo `json:"tempo,omitempty" yaml:"tempo,omitempty" mapstructure:"tempo,omitempty"` + + // The type of tracing to use, either `none` or `tempo`. `none` will disable the + // Tracing module and `tempo` will install a Grafana Tempo deployment. + // + // Default is `tempo`. + Type SpecDistributionModulesTracingType `json:"type" yaml:"type" mapstructure:"type"` } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecPluginsHelmReleasesElemSetElem) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesTracing) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecPluginsHelmReleasesElemSetElem: required") - } - if v, ok := raw["value"]; !ok || v == nil { - return fmt.Errorf("field value in SpecPluginsHelmReleasesElemSetElem: required") + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesTracing: required") } - type Plain SpecPluginsHelmReleasesElemSetElem + type Plain SpecDistributionModulesTracing var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecPluginsHelmReleasesElemSetElem(plain) + *j = SpecDistributionModulesTracing(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuthProviderBasicAuth) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionCustomPatchesConfigMapGeneratorResource) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["password"]; !ok || v == nil { - return fmt.Errorf("field password in SpecDistributionModulesAuthProviderBasicAuth: required") - } - if v, ok := raw["username"]; !ok || v == nil { - return fmt.Errorf("field username in SpecDistributionModulesAuthProviderBasicAuth: required") + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecDistributionCustomPatchesConfigMapGeneratorResource: required") } - type Plain SpecDistributionModulesAuthProviderBasicAuth + type Plain SpecDistributionCustomPatchesConfigMapGeneratorResource var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesAuthProviderBasicAuth(plain) + *j = SpecDistributionCustomPatchesConfigMapGeneratorResource(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuthOverridesIngress) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModules) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["host"]; !ok || v == nil { - return fmt.Errorf("field host in SpecDistributionModulesAuthOverridesIngress: required") + if v, ok := raw["dr"]; !ok || v == nil { + return fmt.Errorf("field dr in SpecDistributionModules: required") } - if v, ok := raw["ingressClass"]; !ok || v == nil { - return fmt.Errorf("field ingressClass in SpecDistributionModulesAuthOverridesIngress: required") + if v, ok := raw["ingress"]; !ok || v == nil { + return fmt.Errorf("field ingress in SpecDistributionModules: required") } - type Plain SpecDistributionModulesAuthOverridesIngress + if v, ok := raw["logging"]; !ok || v == nil { + return fmt.Errorf("field logging in SpecDistributionModules: required") + } + if v, ok := raw["policy"]; !ok || v == nil { + return fmt.Errorf("field policy in SpecDistributionModules: required") + } + type Plain SpecDistributionModules var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesAuthOverridesIngress(plain) + *j = SpecDistributionModules(plain) return nil } +type TypesKubeLabels map[string]string + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuthDex) UnmarshalJSON(b []byte) error { +func (j *SpecDistribution) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["connectors"]; !ok || v == nil { - return fmt.Errorf("field connectors in SpecDistributionModulesAuthDex: required") + if v, ok := raw["kubeconfig"]; !ok || v == nil { + return fmt.Errorf("field kubeconfig in SpecDistribution: required") + } + if v, ok := raw["modules"]; !ok || v == nil { + return fmt.Errorf("field modules in SpecDistribution: required") } - type Plain SpecDistributionModulesAuthDex + type Plain SpecDistribution var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesAuthDex(plain) + *j = SpecDistribution(plain) return nil } +type SpecPluginsHelmReleasesElemSetElem struct { + // The name of the set + Name string `json:"name" yaml:"name" mapstructure:"name"` + + // The value of the set + Value string `json:"value" yaml:"value" mapstructure:"value"` +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionCustomPatchesSecretGeneratorResource) UnmarshalJSON(b []byte) error { +func (j *SpecPluginsHelmReleasesElemSetElem) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecDistributionCustomPatchesSecretGeneratorResource: required") + return fmt.Errorf("field name in SpecPluginsHelmReleasesElemSetElem: required") } - type Plain SpecDistributionCustomPatchesSecretGeneratorResource + if v, ok := raw["value"]; !ok || v == nil { + return fmt.Errorf("field value in SpecPluginsHelmReleasesElemSetElem: required") + } + type Plain SpecPluginsHelmReleasesElemSetElem var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionCustomPatchesSecretGeneratorResource(plain) + *j = SpecPluginsHelmReleasesElemSetElem(plain) return nil } +type SpecPluginsHelmReleases []struct { + // The chart of the release + Chart string `json:"chart" yaml:"chart" mapstructure:"chart"` + + // Disable running `helm diff` validation when installing the plugin, it will + // still be done when upgrading. + DisableValidationOnInstall *bool `json:"disableValidationOnInstall,omitempty" yaml:"disableValidationOnInstall,omitempty" mapstructure:"disableValidationOnInstall,omitempty"` + + // The name of the release + Name string `json:"name" yaml:"name" mapstructure:"name"` + + // The namespace of the release + Namespace string `json:"namespace" yaml:"namespace" mapstructure:"namespace"` + + // Set corresponds to the JSON schema field "set". + Set []SpecPluginsHelmReleasesElemSetElem `json:"set,omitempty" yaml:"set,omitempty" mapstructure:"set,omitempty"` + + // The values of the release + Values []string `json:"values,omitempty" yaml:"values,omitempty" mapstructure:"values,omitempty"` + + // The version of the release + Version *string `json:"version,omitempty" yaml:"version,omitempty" mapstructure:"version,omitempty"` +} + +type SpecPluginsHelmRepositories []struct { + // The name of the repository + Name string `json:"name" yaml:"name" mapstructure:"name"` + + // The url of the repository + Url string `json:"url" yaml:"url" mapstructure:"url"` +} + +type SpecPluginsHelm struct { + // Releases corresponds to the JSON schema field "releases". + Releases SpecPluginsHelmReleases `json:"releases,omitempty" yaml:"releases,omitempty" mapstructure:"releases,omitempty"` + + // Repositories corresponds to the JSON schema field "repositories". + Repositories SpecPluginsHelmRepositories `json:"repositories,omitempty" yaml:"repositories,omitempty" mapstructure:"repositories,omitempty"` +} + +type SpecPluginsKustomize []struct { + // The folder of the kustomize plugin + Folder string `json:"folder" yaml:"folder" mapstructure:"folder"` + + // The name of the kustomize plugin + Name string `json:"name" yaml:"name" mapstructure:"name"` +} + +type SpecPlugins struct { + // Helm corresponds to the JSON schema field "helm". + Helm *SpecPluginsHelm `json:"helm,omitempty" yaml:"helm,omitempty" mapstructure:"helm,omitempty"` + + // Kustomize corresponds to the JSON schema field "kustomize". + Kustomize SpecPluginsKustomize `json:"kustomize,omitempty" yaml:"kustomize,omitempty" mapstructure:"kustomize,omitempty"` +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionCustomPatchesSecretGeneratorResourceBehavior) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior { + for _, expected := range enumValues_SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior, v) } - *j = SpecDistributionCustomPatchesSecretGeneratorResourceBehavior(v) + *j = SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior(v) return nil } -var enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior = []interface{}{ - "create", - "replace", - "merge", -} - // UnmarshalJSON implements json.Unmarshaler. func (j *Spec) UnmarshalJSON(b []byte) error { var raw map[string]interface{} @@ -2396,46 +2555,6 @@ func (j *Spec) UnmarshalJSON(b []byte) error { return nil } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionCustomPatchesConfigMapGeneratorResource) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecDistributionCustomPatchesConfigMapGeneratorResource: required") - } - type Plain SpecDistributionCustomPatchesConfigMapGeneratorResource - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistributionCustomPatchesConfigMapGeneratorResource(plain) - return nil -} - -type TypesKubeLabels map[string]string - -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior, v) - } - *j = SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior(v) - return nil -} - var enumValues_SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior = []interface{}{ "create", "replace", @@ -2502,15 +2621,39 @@ func (j *TypesKubeTolerationOperator) UnmarshalJSON(b []byte) error { return nil } -const TypesKubeTolerationEffectNoExecute TypesKubeTolerationEffect = "NoExecute" - -type TypesKubeTolerationOperator string - var enumValues_TypesKubeTolerationOperator = []interface{}{ "Exists", "Equal", } +type TypesKubeTolerationOperator string + +const ( + TypesKubeTolerationEffectNoExecute TypesKubeTolerationEffect = "NoExecute" + TypesKubeTolerationEffectPreferNoSchedule TypesKubeTolerationEffect = "PreferNoSchedule" + TypesKubeTolerationEffectNoSchedule TypesKubeTolerationEffect = "NoSchedule" +) + +// UnmarshalJSON implements json.Unmarshaler. +func (j *TypesKubeTolerationEffect) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_TypesKubeTolerationEffect { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesKubeTolerationEffect, v) + } + *j = TypesKubeTolerationEffect(v) + return nil +} + // UnmarshalJSON implements json.Unmarshaler. func (j *SpecDistributionModulesAuthPomeriumSecrets) UnmarshalJSON(b []byte) error { var raw map[string]interface{} @@ -2569,9 +2712,9 @@ func (j *TypesKubeTolerationEffect_1) UnmarshalJSON(b []byte) error { } const ( - TypesKubeTolerationEffect_1_NoExecute TypesKubeTolerationEffect_1 = "NoExecute" TypesKubeTolerationEffect_1_NoSchedule TypesKubeTolerationEffect_1 = "NoSchedule" TypesKubeTolerationEffect_1_PreferNoSchedule TypesKubeTolerationEffect_1 = "PreferNoSchedule" + TypesKubeTolerationEffect_1_NoExecute TypesKubeTolerationEffect_1 = "NoExecute" ) type TypesKubeTolerationOperator_1 string @@ -2644,31 +2787,22 @@ func (j *TypesKubeToleration_1) UnmarshalJSON(b []byte) error { return nil } -const ( - TypesKubeTolerationEffectPreferNoSchedule TypesKubeTolerationEffect = "PreferNoSchedule" - TypesKubeTolerationEffectNoSchedule TypesKubeTolerationEffect = "NoSchedule" -) +type TypesFuryModuleComponentOverrides_1 struct { + // NodeSelector corresponds to the JSON schema field "nodeSelector". + NodeSelector TypesKubeNodeSelector_1 `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` -// UnmarshalJSON implements json.Unmarshaler. -func (j *TypesKubeTolerationEffect) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_TypesKubeTolerationEffect { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesKubeTolerationEffect, v) - } - *j = TypesKubeTolerationEffect(v) - return nil + // Tolerations corresponds to the JSON schema field "tolerations". + Tolerations []TypesKubeToleration_1 `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` +} + +var enumValues_TypesKubeTolerationEffect = []interface{}{ + "NoSchedule", + "PreferNoSchedule", + "NoExecute", } +type TypesKubeTolerationEffect string + // UnmarshalJSON implements json.Unmarshaler. func (j *SpecDistributionModulesAuthPomerium_2) UnmarshalJSON(b []byte) error { var raw map[string]interface{} @@ -2687,19 +2821,82 @@ func (j *SpecDistributionModulesAuthPomerium_2) UnmarshalJSON(b []byte) error { return nil } -var enumValues_TypesKubeTolerationEffect = []interface{}{ +type TypesEnvRef string + +type TypesFileRef string + +type TypesIpAddress string + +type TypesKubeLabels_1 map[string]string + +type TypesKubeTaintsEffect string + +var enumValues_TypesKubeTaintsEffect = []interface{}{ "NoSchedule", "PreferNoSchedule", "NoExecute", } -type TypesKubeTolerationEffect string +// UnmarshalJSON implements json.Unmarshaler. +func (j *TypesKubeTaintsEffect) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_TypesKubeTaintsEffect { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesKubeTaintsEffect, v) + } + *j = TypesKubeTaintsEffect(v) + return nil +} -type TypesIpAddress string +const ( + TypesKubeTaintsEffectNoSchedule TypesKubeTaintsEffect = "NoSchedule" + TypesKubeTaintsEffectPreferNoSchedule TypesKubeTaintsEffect = "PreferNoSchedule" + TypesKubeTaintsEffectNoExecute TypesKubeTaintsEffect = "NoExecute" +) -type TypesKubeLabels_1 map[string]string +type TypesKubeTaints struct { + // Effect corresponds to the JSON schema field "effect". + Effect TypesKubeTaintsEffect `json:"effect" yaml:"effect" mapstructure:"effect"` -type TypesKubeTaints []string + // Key corresponds to the JSON schema field "key". + Key string `json:"key" yaml:"key" mapstructure:"key"` + + // Value corresponds to the JSON schema field "value". + Value string `json:"value" yaml:"value" mapstructure:"value"` +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *TypesKubeTaints) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["effect"]; !ok || v == nil { + return fmt.Errorf("field effect in TypesKubeTaints: required") + } + if v, ok := raw["key"]; !ok || v == nil { + return fmt.Errorf("field key in TypesKubeTaints: required") + } + if v, ok := raw["value"]; !ok || v == nil { + return fmt.Errorf("field value in TypesKubeTaints: required") + } + type Plain TypesKubeTaints + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = TypesKubeTaints(plain) + return nil +} type TypesSemVer string diff --git a/pkg/apis/onpremises/v1alpha2/public/schema.go b/pkg/apis/onpremises/v1alpha2/public/schema.go index 854a24a16..93972fde1 100644 --- a/pkg/apis/onpremises/v1alpha2/public/schema.go +++ b/pkg/apis/onpremises/v1alpha2/public/schema.go @@ -14,6 +14,7 @@ type Metadata struct { Name string `json:"name" yaml:"name" mapstructure:"name"` } +// A KFD Cluster deployed on top of a set of existing VMs. type OnpremisesKfdV1Alpha2 struct { // ApiVersion corresponds to the JSON schema field "apiVersion". ApiVersion string `json:"apiVersion" yaml:"apiVersion" mapstructure:"apiVersion"` @@ -38,7 +39,7 @@ type Spec struct { // Defines which KFD version will be installed and, in consequence, the Kubernetes // version used to create the cluster. It supports git tags and branches. Example: - // v1.30.1. + // `v1.30.1`. DistributionVersion string `json:"distributionVersion" yaml:"distributionVersion" mapstructure:"distributionVersion"` // Kubernetes corresponds to the JSON schema field "kubernetes". @@ -62,7 +63,7 @@ type SpecDistribution struct { // Common configuration for all the distribution modules. type SpecDistributionCommon struct { // The node selector to use to place the pods for all the KFD modules. Follows - // Kubernetes selector format. Example: `node.kubernetes.io/role: infra` + // Kubernetes selector format. Example: `node.kubernetes.io/role: infra`. NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` // Provider corresponds to the JSON schema field "provider". @@ -70,6 +71,9 @@ type SpecDistributionCommon struct { // URL of the registry where to pull images from for the Distribution phase. // (Default is `registry.sighup.io/fury`). + // + // NOTE: If plugins are pulling from the default registry, the registry will be + // replaced for the plugin too. Registry *string `json:"registry,omitempty" yaml:"registry,omitempty" mapstructure:"registry,omitempty"` // The relative path to the vendor directory, does not need to be changed. @@ -541,6 +545,8 @@ type SpecDistributionModulesAuthProvider struct { // and require authentication before accessing them. // - `basicAuth`: will protect the infrastructural ingresses with HTTP basic auth // (username and password) authentication. + // + // Default is `none`. Type SpecDistributionModulesAuthProviderType `json:"type" yaml:"type" mapstructure:"type"` } @@ -569,6 +575,8 @@ type SpecDistributionModulesDr struct { // The type of the Disaster Recovery, must be `none` or `on-premises`. `none` // disables the module and `on-premises` will install Velero and an optional MinIO // deployment. + // + // Default is `none`. Type SpecDistributionModulesDrType `json:"type" yaml:"type" mapstructure:"type"` // Velero corresponds to the JSON schema field "velero". @@ -667,7 +675,7 @@ type SpecDistributionModulesIngress struct { // If corresponds to the JSON schema field "if". If interface{} `json:"if,omitempty" yaml:"if,omitempty" mapstructure:"if,omitempty"` - // Configurations for the nginx ingress controller package. + // Configurations for the Ingress nginx controller package. Nginx SpecDistributionModulesIngressNginx `json:"nginx" yaml:"nginx" mapstructure:"nginx"` // Overrides corresponds to the JSON schema field "overrides". @@ -695,7 +703,7 @@ type SpecDistributionModulesIngressCertManagerClusterIssuer struct { // The email address to use during the certificate issuing process. Email string `json:"email" yaml:"email" mapstructure:"email"` - // Name of the clusterIssuer + // Name of the clusterIssuer. Name string `json:"name" yaml:"name" mapstructure:"name"` // List of challenge solvers to use instead of the default one for the `http01` @@ -723,7 +731,7 @@ type SpecDistributionModulesIngressNginx struct { // Tls corresponds to the JSON schema field "tls". Tls *SpecDistributionModulesIngressNginxTLS `json:"tls,omitempty" yaml:"tls,omitempty" mapstructure:"tls,omitempty"` - // The type of the nginx ingress controller, options are: + // The type of the Ingress nginx controller, options are: // - `none`: no ingress controller will be installed and no infrastructural // ingresses will be created. // - `single`: a single ingress controller with ingress class `nginx` will be @@ -733,6 +741,8 @@ type SpecDistributionModulesIngressNginx struct { // `internal` ingress class intended for private ingresses and one for the // `external` ingress class intended for public ingresses. KFD infrastructural // ingresses wil use the `internal` ingress class when using the dual type. + // + // Default is `single`. Type SpecDistributionModulesIngressNginxType `json:"type" yaml:"type" mapstructure:"type"` } @@ -782,11 +792,11 @@ type SpecDistributionModulesIngressOverrides struct { // Ingresses corresponds to the JSON schema field "ingresses". Ingresses *SpecDistributionModulesIngressOverridesIngresses `json:"ingresses,omitempty" yaml:"ingresses,omitempty" mapstructure:"ingresses,omitempty"` - // Set to override the node selector used to place the pods of the Ingress module + // Set to override the node selector used to place the pods of the Ingress module. NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` // Set to override the tolerations that will be added to the pods of the Ingress - // module + // module. Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` } @@ -827,10 +837,12 @@ type SpecDistributionModulesLogging struct { // - `customOuputs`: the Logging Operator will be deployed and installed but with // no local storage, you will have to create the needed Outputs and ClusterOutputs // to ship the logs to your desired storage. + // + // Default is `opensearch`. Type SpecDistributionModulesLoggingType `json:"type" yaml:"type" mapstructure:"type"` } -// DEPRECATED in latest versions of KFD. +// DEPRECATED since KFD v1.26.6, 1.27.5, v1.28.0. type SpecDistributionModulesLoggingCerebro struct { // Overrides corresponds to the JSON schema field "overrides". Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` @@ -1029,6 +1041,8 @@ type SpecDistributionModulesMonitoring struct { // storing them locally in the cluster. // - `mimir`: will install the same as the `prometheus` option, plus Grafana Mimir // that allows for longer retention of metrics and the usage of Object Storage. + // + // Default is `prometheus`. Type SpecDistributionModulesMonitoringType `json:"type" yaml:"type" mapstructure:"type"` // X509Exporter corresponds to the JSON schema field "x509Exporter". @@ -1037,7 +1051,7 @@ type SpecDistributionModulesMonitoring struct { type SpecDistributionModulesMonitoringAlertManager struct { // The webhook URL to send dead man's switch monitoring, for example to use with - // healthchecks.io + // healthchecks.io. DeadManSwitchWebhookUrl *string `json:"deadManSwitchWebhookUrl,omitempty" yaml:"deadManSwitchWebhookUrl,omitempty" mapstructure:"deadManSwitchWebhookUrl,omitempty"` // Set to false to avoid installing the Prometheus rules (alerts) included with @@ -1256,6 +1270,8 @@ type SpecDistributionModulesPolicy struct { // The type of policy enforcement to use, either `none`, `gatekeeper` or // `kyverno`. + // + // Default is `none`. Type SpecDistributionModulesPolicyType `json:"type" yaml:"type" mapstructure:"type"` } @@ -1334,6 +1350,8 @@ type SpecDistributionModulesTracing struct { // The type of tracing to use, either `none` or `tempo`. `none` will disable the // Tracing module and `tempo` will install a Grafana Tempo deployment. + // + // Default is `tempo`. Type SpecDistributionModulesTracingType `json:"type" yaml:"type" mapstructure:"type"` } @@ -2128,10 +2146,10 @@ type TypesKubeResources struct { } type TypesKubeResourcesRequests struct { - // The cpu request for the loki pods + // The CPU request for the Pod, in cores. Example: `500m`. Cpu *string `json:"cpu,omitempty" yaml:"cpu,omitempty" mapstructure:"cpu,omitempty"` - // The memory request for the prometheus pods + // The memory request for the Pod. Example: `500M`. Memory *string `json:"memory,omitempty" yaml:"memory,omitempty" mapstructure:"memory,omitempty"` } @@ -2154,10 +2172,10 @@ func (j *SpecDistributionModulesPolicy) UnmarshalJSON(b []byte) error { } type TypesKubeResourcesLimits struct { - // The cpu limit for the loki pods + // The CPU limit for the Pod. Example: `1000m`. Cpu *string `json:"cpu,omitempty" yaml:"cpu,omitempty" mapstructure:"cpu,omitempty"` - // The memory limit for the prometheus pods + // The memory limit for the Pod. Example: `1G`. Memory *string `json:"memory,omitempty" yaml:"memory,omitempty" mapstructure:"memory,omitempty"` } @@ -3200,6 +3218,10 @@ type SpecPluginsHelmReleases []struct { // The chart of the release Chart string `json:"chart" yaml:"chart" mapstructure:"chart"` + // Disable running `helm diff` validation when installing the plugin, it will + // still be done when upgrading. + DisableValidationOnInstall *bool `json:"disableValidationOnInstall,omitempty" yaml:"disableValidationOnInstall,omitempty" mapstructure:"disableValidationOnInstall,omitempty"` + // The name of the release Name string `json:"name" yaml:"name" mapstructure:"name"` diff --git a/schemas/private/ekscluster-kfd-v1alpha2.json b/schemas/private/ekscluster-kfd-v1alpha2.json index 2aa905308..72c620698 100644 --- a/schemas/private/ekscluster-kfd-v1alpha2.json +++ b/schemas/private/ekscluster-kfd-v1alpha2.json @@ -6,6 +6,7 @@ "properties": { "name": { "type": "string", + "description": "The name of the cluster. It will also be used as a prefix for all the other resources created.", "minLength": 1, "maxLength": 56 } @@ -20,17 +21,20 @@ "properties": { "distributionVersion": { "type": "string", + "description": "Defines which KFD version will be installed and, in consequence, the Kubernetes version used to create the cluster. It supports git tags and branches. Example: `v1.30.1`.", "minLength": 1 }, "region": { - "$ref": "#/$defs/Types.AwsRegion" + "$ref": "#/$defs/Types.AwsRegion", + "description": "Defines in which AWS region the cluster and all the related resources will be created." }, "tags": { "$ref": "#/$defs/Types.AwsTags", "description": "This map defines which will be the common tags that will be added to all the resources created on AWS." }, "toolsConfiguration": { - "$ref": "#/$defs/Spec.ToolsConfiguration" + "$ref": "#/$defs/Spec.ToolsConfiguration", + "description": "Configuration for tools used by furyctl, like Terraform." }, "infrastructure": { "$ref": "#/$defs/Spec.Infrastructure" @@ -187,28 +191,29 @@ "Spec.Distribution.Common": { "type": "object", "additionalProperties": false, + "description": "Common configuration for all the distribution modules.", "properties": { "nodeSelector": { "$ref": "#/$defs/Types.KubeNodeSelector", - "description": "The node selector to use to place the pods for all the KFD modules" + "description": "The node selector to use to place the pods for all the KFD modules. Follows Kubernetes selector format. Example: `node.kubernetes.io/role: infra`." }, "tolerations": { "type": "array", "items": { "$ref": "#/$defs/Types.KubeToleration" }, - "description": "The tolerations that will be added to the pods for all the KFD modules" + "description": "An array with the tolerations that will be added to the pods for all the KFD modules. Follows Kubernetes tolerations format. Example:\n\n```yaml\n- effect: NoSchedule\n key: node.kubernetes.io/role\n value: infra\n```" }, "provider": { "$ref": "#/$defs/Spec.Distribution.Common.Provider" }, "relativeVendorPath": { "type": "string", - "description": "The relative path to the vendor directory, does not need to be changed" + "description": "The relative path to the vendor directory, does not need to be changed." }, "registry": { "type": "string", - "description": "URL of the registry where to pull images from for the Distribution phase. (Default is registry.sighup.io/fury).\n\nNOTE: If plugins are pulling from the default registry, the registry will be replaced for these plugins too." + "description": "URL of the registry where to pull images from for the Distribution phase. (Default is `registry.sighup.io/fury`)." } } }, @@ -218,7 +223,7 @@ "properties": { "type": { "type": "string", - "description": "The type of the provider, must be EKS if specified" + "description": "The provider type. Don't set. FOR INTERNAL USE ONLY." } }, "required": [ @@ -267,6 +272,7 @@ "Spec.Distribution.Modules.Auth": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Auth module.", "properties": { "overrides": { "$ref": "#/$defs/Spec.Distribution.Modules.Auth.Overrides" @@ -276,7 +282,7 @@ }, "baseDomain": { "type": "string", - "description": "The base domain for the auth module" + "description": "Base domain for the ingresses created by the Auth module (Gangplank, Pomerium, Dex). Notice that when nginx type is dual, these will use the `external` ingress class." }, "pomerium": { "$ref": "#/$defs/Spec.Distribution.Modules.Auth.Pomerium" @@ -355,14 +361,15 @@ "Spec.Distribution.Modules.Auth.Dex": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Dex package.", "properties": { "connectors": { "type": "array", - "description": "The connectors for dex" + "description": "A list with each item defining a Dex connector. Follows Dex connectors configuration format: https://dexidp.io/docs/connectors/" }, "additionalStaticClients": { "type": "array", - "description": "The additional static clients for dex" + "description": "Additional static clients defitions that will be added to the default clients included with the distribution in Dex's configuration. Example:\n\n```yaml\nadditionalStaticClients:\n - id: my-custom-client\n name: \"A custom additional static client\"\n redirectURIs:\n - \"https://myapp.tld/redirect\"\n - \"https://alias.tld/oidc-callback\"\n secret: supersecretpassword\n```\nReference: https://dexidp.io/docs/connectors/local/" }, "expiry": { "type": "object", @@ -389,10 +396,11 @@ "Spec.Distribution.Modules.Auth.Overrides": { "type": "object", "additionalProperties": false, + "description": "Override the common configuration with a particular configuration for the Auth module.", "properties": { "nodeSelector": { "$ref": "#/$defs/Types.KubeNodeSelector", - "description": "The node selector to use to place the pods for the auth module" + "description": "Set to override the node selector used to place the pods of the Auth module." }, "tolerations": { "type": [ @@ -402,10 +410,11 @@ "items": { "$ref": "#/$defs/Types.KubeToleration" }, - "description": "The tolerations that will be added to the pods for the auth module" + "description": "Set to override the tolerations that will be added to the pods of the Auth module." }, "ingresses": { "type": "object", + "description": "Override the definition of the Auth module ingresses.", "additionalProperties": { "$ref": "#/$defs/Spec.Distribution.Modules.Auth.Overrides.Ingress" } @@ -418,11 +427,11 @@ "properties": { "host": { "type": "string", - "description": "The host of the ingress" + "description": "Use this host for the ingress instead of the default one." }, "ingressClass": { "type": "string", - "description": "The ingress class of the ingress" + "description": "Use this ingress class for the ingress instead of the default one." } }, "required": [ @@ -444,7 +453,7 @@ "basicAuth", "sso" ], - "description": "The type of the provider, must be ***none***, ***sso*** or ***basicAuth***" + "description": "The type of the Auth provider, options are:\n- `none`: will disable authentication in the infrastructural ingresses.\n- `sso`: will protect the infrastructural ingresses with Pomerium and Dex (SSO) and require authentication before accessing them.\n- `basicAuth`: will protect the infrastructural ingresses with HTTP basic auth (username and password) authentication.\n\nDefault is `none`." }, "basicAuth": { "$ref": "#/$defs/Spec.Distribution.Modules.Auth.Provider.BasicAuth" @@ -457,14 +466,15 @@ "Spec.Distribution.Modules.Auth.Provider.BasicAuth": { "type": "object", "additionalProperties": false, + "description": "Configuration for the HTTP Basic Auth provider.", "properties": { "username": { "type": "string", - "description": "The username for the basic auth" + "description": "The username for logging in with the HTTP basic authentication." }, "password": { "type": "string", - "description": "The password for the basic auth" + "description": "The password for logging in with the HTTP basic authentication." } }, "required": [ @@ -547,6 +557,7 @@ "Spec.Distribution.Modules.Dr": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Disaster Recovery module.", "properties": { "overrides": { "$ref": "#/$defs/Types.FuryModuleOverrides" @@ -557,7 +568,7 @@ "none", "eks" ], - "description": "The type of the DR, must be ***none*** or ***eks***" + "description": "The type of the Disaster Recovery, must be `none` or `eks`. `none` disables the module and `eks` will install Velero and use an S3 bucket to store the backups.\n\nDefault is `none`." }, "velero": { "$ref": "#/$defs/Spec.Distribution.Modules.Dr.Velero" @@ -631,14 +642,14 @@ "bucketName": { "$ref": "#/$defs/Types.AwsS3BucketName", "maxLength": 49, - "description": "The name of the velero bucket" + "description": "The name of the bucket for Velero." }, "iamRoleArn": { "$ref": "#/$defs/Types.AwsArn" }, "region": { "$ref": "#/$defs/Types.AwsRegion", - "description": "The region where the velero bucket is located" + "description": "The region where the bucket for Velero will be located." } }, "required": [ @@ -668,10 +679,11 @@ "properties": { "baseDomain": { "type": "string", - "description": "the base domain used for all the KFD ingresses, if in the nginx dual configuration, it should be the same as the .spec.distribution.modules.ingress.dns.private.name zone" + "description": "The base domain used for all the KFD ingresses. If in the nginx `dual` configuration type, this value should be the same as the `.spec.distribution.modules.ingress.dns.private.name` zone." }, "certManager": { - "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.CertManager" + "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.CertManager", + "description": "Configuration for the cert-manager package. Required even if `ingress.nginx.type` is `none`, cert-manager is used for managing other certificates in the cluster besides the TLS termination certificates for the ingresses." }, "dns": { "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.DNS" @@ -684,7 +696,7 @@ }, "nginx": { "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.Nginx", - "description": "Configurations for the nginx ingress controller module" + "description": "Configurations for the Ingress nginx controller package." }, "overrides": { "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.Overrides" @@ -707,6 +719,7 @@ "Spec.Distribution.Modules.Ingress.CertManager": { "type": "object", "additionalProperties": false, + "description": "Configuration for the cert-manager package. Required even if `ingress.nginx.type` is `none`, cert-manager is used for managing other certificates in the cluster besides the TLS termination certificates for the ingresses.", "properties": { "clusterIssuer": { "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.CertManager.ClusterIssuer" @@ -721,6 +734,7 @@ }, "Spec.Distribution.Modules.Ingress.CertManager.ClusterIssuer": { "additionalProperties": false, + "description": "Configuration for the cert-manager's ACME clusterIssuer used to request certificates from Let's Encrypt.", "oneOf": [ { "required": [ @@ -737,18 +751,18 @@ "email": { "type": "string", "format": "email", - "description": "The email of the cluster issuer" + "description": "The email address to use during the certificate issuing process." }, "name": { "type": "string", - "description": "The name of the cluster issuer" + "description": "Name of the clusterIssuer." }, "route53": { "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.ClusterIssuer.Route53" }, "solvers": { "type": "array", - "description": "The custom solvers configurations" + "description": "List of challenge solvers to use instead of the default one for the `http01` challenge." }, "type": { "type": "string", @@ -756,7 +770,7 @@ "dns01", "http01" ], - "description": "The type of the cluster issuer, must be ***dns01*** or ***http01***" + "description": "The type of the clusterIssuer, must be `dns01` for using DNS challenge or `http01` for using HTTP challenge." } }, "required": [ @@ -788,6 +802,7 @@ }, "Spec.Distribution.Modules.Ingress.DNS": { "type": "object", + "description": "DNS definition, used in conjunction with `externalDNS` package to automate DNS management and certificates emission.", "additionalProperties": false, "properties": { "public": { @@ -807,14 +822,15 @@ }, "Spec.Distribution.Modules.Ingress.DNS.Private": { "additionalProperties": false, + "description": "The private DNS zone is used only when `ingress.nginx.type` is `dual`, for exposing infrastructural services only in the private DNS zone.", "properties": { "create": { "type": "boolean", - "description": "If true, the private hosted zone will be created" + "description": "By default, a Terraform data source will be used to get the private DNS zone. Set to `true` to create the private zone instead." }, "name": { "type": "string", - "description": "The name of the private hosted zone" + "description": "The name of the private hosted zone. Example: `internal.fury-demo.sighup.io`." }, "vpcId": { "type": "string" @@ -833,11 +849,11 @@ "properties": { "name": { "type": "string", - "description": "The name of the public hosted zone" + "description": "The name of the public hosted zone." }, "create": { "type": "boolean", - "description": "If true, the public hosted zone will be created" + "description": "By default, a Terraform data source will be used to get the public DNS zone. Set to `true` to create the public zone instead." } }, "required": [ @@ -881,7 +897,7 @@ "single", "dual" ], - "description": "The type of the nginx ingress controller, must be ***none***, ***single*** or ***dual***" + "description": "The type of the Ingress nginx controller, options are:\n- `none`: no ingress controller will be installed and no infrastructural ingresses will be created.\n- `single`: a single ingress controller with ingress class `nginx` will be installed to manage all the ingress resources, infrastructural ingresses will be created.\n- `dual`: two independent ingress controllers will be installed, one for the `internal` ingress class intended for private ingresses and one for the `external` ingress class intended for public ingresses. KFD infrastructural ingresses wil use the `internal` ingress class when using the dual type.\n\nDefault is `single`." }, "tls": { "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.Nginx.TLS" @@ -905,7 +921,7 @@ "secret", "none" ], - "description": "The provider of the TLS certificate, must be ***none***, ***certManager*** or ***secret***" + "description": "The provider of the TLS certificates for the ingresses, one of: `none`, `certManager`, or `secret`." }, "secret": { "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.Nginx.TLS.Secret" @@ -930,16 +946,19 @@ "Spec.Distribution.Modules.Ingress.Nginx.TLS.Secret": { "type": "object", "additionalProperties": false, + "description": "Kubernetes TLS secret for the ingresses TLS certificate.", "properties": { "cert": { "type": "string", - "description": "The certificate file content or you can use the file notation to get the content from a file" + "description": "The certificate file's content. You can use the `\"{file://}\"` notation to get the content from a file." }, "key": { - "type": "string" + "type": "string", + "description": "The signing key file's content. You can use the `\"{file://}\"` notation to get the content from a file." }, "ca": { - "type": "string" + "type": "string", + "description": "The Certificate Authority certificate file's content. You can use the `\"{file://}\"` notation to get the content from a file." } }, "required": [ @@ -951,20 +970,21 @@ "Spec.Distribution.Modules.Ingress.Overrides": { "type": "object", "additionalProperties": false, + "description": "Override the common configuration with a particular configuration for the Ingress module.", "properties": { "ingresses": { "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.Overrides.Ingresses" }, "nodeSelector": { "$ref": "#/$defs/Types.KubeNodeSelector", - "description": "The node selector to use to place the pods for the ingress module" + "description": "Set to override the node selector used to place the pods of the Ingress module." }, "tolerations": { "type": "array", "items": { "$ref": "#/$defs/Types.KubeToleration" }, - "description": "The tolerations that will be added to the pods for the ingress module" + "description": "Set to override the tolerations that will be added to the pods of the Ingress module." } } }, @@ -980,6 +1000,7 @@ "Spec.Distribution.Modules.Logging": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Logging module.", "properties": { "overrides": { "$ref": "#/$defs/Types.FuryModuleOverrides" @@ -992,7 +1013,7 @@ "loki", "customOutputs" ], - "description": "selects the logging stack. Choosing none will disable the centralized logging. Choosing opensearch will deploy and configure the Logging Operator and an OpenSearch cluster (can be single or triple for HA) where the logs will be stored. Choosing loki will use a distributed Grafana Loki instead of OpenSearh for storage. Choosing customOuput the Logging Operator will be deployed and installed but with no local storage, you will have to create the needed Outputs and ClusterOutputs to ship the logs to your desired storage." + "description": "Selects the logging stack. Options are:\n- `none`: will disable the centralized logging.\n- `opensearch`: will deploy and configure the Logging Operator and an OpenSearch cluster (can be single or triple for HA) where the logs will be stored.\n- `loki`: will use a distributed Grafana Loki instead of OpenSearh for storage.\n- `customOuputs`: the Logging Operator will be deployed and installed but with no local storage, you will have to create the needed Outputs and ClusterOutputs to ship the logs to your desired storage.\n\nDefault is `opensearch`." }, "opensearch": { "$ref": "#/$defs/Spec.Distribution.Modules.Logging.Opensearch" @@ -1049,6 +1070,7 @@ }, "Spec.Distribution.Modules.Logging.Cerebro": { "type": "object", + "description": "DEPRECATED since KFD v1.26.6, 1.27.5, v1.28.0.", "additionalProperties": false, "properties": { "overrides": { @@ -1057,41 +1079,41 @@ } }, "Spec.Distribution.Modules.Logging.CustomOutputs": { - "description": "when using the customOutputs logging type, you need to manually specify the spec of the several Output and ClusterOutputs that the Logging Operator expects to forward the logs collected by the pre-defined flows.", + "description": "When using the `customOutputs` logging type, you need to manually specify the spec of the several `Output` and `ClusterOutputs` that the Logging Operator expects to forward the logs collected by the pre-defined flows.", "type": "object", "additionalProperties": false, "properties": { "audit": { "type": "string", - "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow." + "description": "This value defines where the output from the `audit` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`" }, "events": { "type": "string", - "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow." + "description": "This value defines where the output from the `events` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`" }, "infra": { "type": "string", - "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow." + "description": "This value defines where the output from the `infra` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`" }, "ingressNginx": { "type": "string", - "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow." + "description": "This value defines where the output from the `ingressNginx` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`" }, "kubernetes": { "type": "string", - "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow." + "description": "This value defines where the output from the `kubernetes` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`" }, "systemdCommon": { "type": "string", - "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow." + "description": "This value defines where the output from the `systemdCommon` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`" }, "systemdEtcd": { "type": "string", - "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow." + "description": "This value defines where the output from the `systemdEtcd` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`" }, "errors": { "type": "string", - "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow." + "description": "This value defines where the output from the `errors` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`" } }, "required": [ @@ -1107,10 +1129,12 @@ }, "Spec.Distribution.Modules.Logging.Loki": { "type": "object", + "description": "Configuration for the Loki package.", "additionalProperties": false, "properties": { "backend": { "type": "string", + "description": "The storage backend type for Loki. `minio` will use an in-cluster MinIO deployment for object storage, `externalEndpoint` can be used to point to an external object storage instead of deploying an in-cluster MinIO.", "enum": [ "minio", "externalEndpoint" @@ -1119,26 +1143,27 @@ "externalEndpoint": { "type": "object", "additionalProperties": false, + "description": "Configuration for Loki's external storage backend.", "properties": { "endpoint": { "type": "string", - "description": "The endpoint of the loki external endpoint" + "description": "External S3-compatible endpoint for Loki's storage." }, "insecure": { "type": "boolean", - "description": "If true, the loki external endpoint will be insecure" + "description": "If true, will use HTTP as protocol instead of HTTPS." }, "secretAccessKey": { "type": "string", - "description": "The secret access key of the loki external endpoint" + "description": "The secret access key (password) for the external S3-compatible bucket." }, "accessKeyId": { "type": "string", - "description": "The access key id of the loki external endpoint" + "description": "The access key ID (username) for the external S3-compatible bucket." }, "bucketName": { "type": "string", - "description": "The bucket name of the loki external endpoint" + "description": "The bucket name of the external S3-compatible object storage." } } }, @@ -1150,10 +1175,11 @@ "Spec.Distribution.Modules.Logging.Minio": { "type": "object", "additionalProperties": false, + "description": "Configuration for Logging's MinIO deployment.", "properties": { "storageSize": { "type": "string", - "description": "The PVC size for each minio disk, 6 disks total" + "description": "The PVC size for each MinIO disk, 6 disks total." }, "rootUser": { "type": "object", @@ -1161,11 +1187,11 @@ "properties": { "username": { "type": "string", - "description": "The username of the minio root user" + "description": "The username for the default MinIO root user." }, "password": { "type": "string", - "description": "The password of the minio root user" + "description": "The password for the default MinIO root user." } } }, @@ -1184,14 +1210,14 @@ "single", "triple" ], - "description": "The type of the opensearch, must be ***single*** or ***triple***" + "description": "The type of OpenSearch deployment. One of: `single` for a single replica or `triple` for an HA 3-replicas deployment." }, "resources": { "$ref": "#/$defs/Types.KubeResources" }, "storageSize": { "type": "string", - "description": "The storage size for the opensearch pods" + "description": "The storage size for the OpenSearch volumes." }, "overrides": { "$ref": "#/$defs/Types.FuryModuleComponentOverrides" @@ -1204,6 +1230,7 @@ "Spec.Distribution.Modules.Logging.Operator": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Logging Operator.", "properties": { "overrides": { "$ref": "#/$defs/Types.FuryModuleComponentOverrides" @@ -1213,7 +1240,7 @@ "Spec.Distribution.Modules.Monitoring": { "type": "object", "additionalProperties": false, - "description": "configuration for the Monitoring module components", + "description": "Configuration for the Monitoring module.", "properties": { "type": { "type": "string", @@ -1223,7 +1250,7 @@ "prometheusAgent", "mimir" ], - "description": "The type of the monitoring, must be ***none***, ***prometheus***, ***prometheusAgent*** or ***mimir***.\n\n- `none`: will disable the whole monitoring stack.\n- `prometheus`: will install Prometheus Operator and a preconfigured Prometheus instance, Alertmanager, a set of alert rules, exporters needed to monitor all the components of the cluster, Grafana and a series of dashboards to view the collected metrics, and more.\n- `prometheusAgent`: wil install Prometheus operator, an instance of Prometheus in Agent mode (no alerting, no queries, no storage), and all the exporters needed to get metrics for the status of the cluster and the workloads. Useful when having a centralized (remote) Prometheus where to ship the metrics and not storing them locally in the cluster.\n- `mimir`: will install the same as the `prometheus` option, and in addition Grafana Mimir that allows for longer retention of metrics and the usage of Object Storage." + "description": "The type of the monitoring, must be `none`, `prometheus`, `prometheusAgent` or `mimir`.\n\n- `none`: will disable the whole monitoring stack.\n- `prometheus`: will install Prometheus Operator and a preconfigured Prometheus instace, Alertmanager, a set of alert rules, exporters needed to monitor all the components of the cluster, Grafana and a series of dashboards to view the collected metrics, and more.\n- `prometheusAgent`: wil install Prometheus operator, an instance of Prometheus in Agent mode (no alerting, no queries, no storage), and all the exporters needed to get metrics for the status of the cluster and the workloads. Useful when having a centralized (remote) Prometheus where to ship the metrics and not storing them locally in the cluster.\n- `mimir`: will install the same as the `prometheus` option, plus Grafana Mimir that allows for longer retention of metrics and the usage of Object Storage.\n\nDefault is `prometheus`." }, "overrides": { "$ref": "#/$defs/Types.FuryModuleOverrides" @@ -1266,15 +1293,15 @@ "properties": { "deadManSwitchWebhookUrl": { "type": "string", - "description": "The webhook url to send deadman switch monitoring, for example to use with healthchecks.io" + "description": "The webhook URL to send dead man's switch monitoring, for example to use with healthchecks.io." }, "installDefaultRules": { "type": "boolean", - "description": "If true, the default rules will be installed" + "description": "Set to false to avoid installing the Prometheus rules (alerts) included with the distribution." }, "slackWebhookUrl": { "type": "string", - "description": "The slack webhook url to send alerts" + "description": "The Slack webhook URL where to send the infrastructural and workload alerts to." } } }, @@ -1316,10 +1343,11 @@ "Spec.Distribution.Modules.Monitoring.Mimir": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Mimir package.", "properties": { "retentionTime": { "type": "string", - "description": "The retention time for the mimir pods" + "description": "The retention time for the logs stored in Mimir. Default is `30d`. Value must match the regular expression `[0-9]+(ns|us|µs|ms|s|m|h|d|w|y)` where y = 365 days." }, "backend": { "type": "string", @@ -1327,31 +1355,32 @@ "minio", "externalEndpoint" ], - "description": "The backend for the mimir pods, must be ***minio*** or ***externalEndpoint***" + "description": "The storage backend type for Mimir. `minio` will use an in-cluster MinIO deployment for object storage, `externalEndpoint` can be used to point to an external S3-compatible object storage instead of deploying an in-cluster MinIO." }, "externalEndpoint": { "type": "object", "additionalProperties": false, + "description": "Configuration for Mimir's external storage backend.", "properties": { "endpoint": { "type": "string", - "description": "The endpoint of the external mimir backend" + "description": "External S3-compatible endpoint for Mimir's storage." }, "insecure": { "type": "boolean", - "description": "If true, the external mimir backend will not use tls" + "description": "If true, will use HTTP as protocol instead of HTTPS." }, "secretAccessKey": { "type": "string", - "description": "The secret access key of the external mimir backend" + "description": "The secret access key (password) for the external S3-compatible bucket." }, "accessKeyId": { "type": "string", - "description": "The access key id of the external mimir backend" + "description": "The access key ID (username) for the external S3-compatible bucket." }, "bucketName": { "type": "string", - "description": "The bucket name of the external mimir backend" + "description": "The bucket name of the external S3-compatible object storage." } } }, @@ -1362,11 +1391,12 @@ }, "Spec.Distribution.Modules.Monitoring.Minio": { "type": "object", + "description": "Configuration for Monitoring's MinIO deployment.", "additionalProperties": false, "properties": { "storageSize": { "type": "string", - "description": "The storage size for the minio pods" + "description": "The PVC size for each MinIO disk, 6 disks total." }, "rootUser": { "type": "object", @@ -1374,11 +1404,11 @@ "properties": { "username": { "type": "string", - "description": "The username for the minio root user" + "description": "The username for the default MinIO root user." }, "password": { "type": "string", - "description": "The password for the minio root user" + "description": "The password for the default MinIO root user." } } }, @@ -1396,15 +1426,15 @@ }, "retentionTime": { "type": "string", - "description": "The retention time for the k8s Prometheus instance." + "description": "The retention time for the `k8s` Prometheus instance." }, "retentionSize": { "type": "string", - "description": "The retention size for the k8s Prometheus instance." + "description": "The retention size for the `k8s` Prometheus instance." }, "storageSize": { "type": "string", - "description": "The storage size for the k8s Prometheus instance." + "description": "The storage size for the `k8s` Prometheus instance." }, "remoteWrite": { "description": "Set this option to ship the collected metrics to a remote Prometheus receiver.\n\n`remoteWrite` is an array of objects that allows configuring the [remoteWrite](https://prometheus.io/docs/specs/remote_write_spec/) options for Prometheus. The objects in the array follow [the same schema as in the prometheus operator](https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.RemoteWriteSpec).", @@ -1442,9 +1472,10 @@ }, "Spec.Distribution.Modules.Networking": { "additionalProperties": false, + "description": "Configuration for the Networking module.", "properties": { "overrides": { - "$ref": "#/$defs/Types.FuryModuleComponentOverrides" + "$ref": "#/$defs/Types.FuryModuleOverrides" }, "tigeraOperator": { "$ref": "#/$defs/Spec.Distribution.Modules.Networking.TigeraOperator" @@ -1470,6 +1501,7 @@ "Spec.Distribution.Modules.Policy": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Policy module.", "properties": { "overrides": { "$ref": "#/$defs/Types.FuryModuleOverrides" @@ -1481,7 +1513,7 @@ "gatekeeper", "kyverno" ], - "description": "The type of security to use, either ***none***, ***gatekeeper*** or ***kyverno***" + "description": "The type of policy enforcement to use, either `none`, `gatekeeper` or `kyverno`.\n\nDefault is `none`." }, "gatekeeper": { "$ref": "#/$defs/Spec.Distribution.Modules.Policy.Gatekeeper" @@ -1527,6 +1559,7 @@ "Spec.Distribution.Modules.Policy.Gatekeeper": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Gatekeeper package.", "properties": { "additionalExcludedNamespaces": { "type": "array", @@ -1542,11 +1575,11 @@ "dryrun", "warn" ], - "description": "The enforcement action to use for the gatekeeper module" + "description": "The default enforcement action to use for the included constraints. `deny` will block the admission when violations to the policies are found, `warn` will show a message to the user but will admit the violating requests and `dryrun` won't give any feedback to the user but it will log the violations." }, "installDefaultPolicies": { "type": "boolean", - "description": "If true, the default policies will be installed" + "description": "Set to `false` to avoid installing the default Gatekeeper policies (constraints templates and constraints) included with the distribution." }, "overrides": { "$ref": "#/$defs/Types.FuryModuleComponentOverrides" @@ -1560,13 +1593,14 @@ "Spec.Distribution.Modules.Policy.Kyverno": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Kyverno package.", "properties": { "additionalExcludedNamespaces": { "type": "array", "items": { "type": "string" }, - "description": "This parameter adds namespaces to Kyverno's exemption list, so it will not enforce the constraints on them." + "description": "This parameter adds namespaces to Kyverno's exemption list, so it will not enforce the policies on them." }, "validationFailureAction": { "type": "string", @@ -1574,11 +1608,11 @@ "Audit", "Enforce" ], - "description": "The validation failure action to use for the kyverno module" + "description": "The validation failure action to use for the policies, `Enforce` will block when a request does not comply with the policies and `Audit` will not block but log when a request does not comply with the policies." }, "installDefaultPolicies": { "type": "boolean", - "description": "If true, the default policies will be installed" + "description": "Set to `false` to avoid installing the default Kyverno policies included with distribution." }, "overrides": { "$ref": "#/$defs/Types.FuryModuleComponentOverrides" @@ -1592,6 +1626,7 @@ "Spec.Distribution.Modules.Tracing": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Tracing module.", "properties": { "overrides": { "$ref": "#/$defs/Types.FuryModuleOverrides" @@ -1602,7 +1637,7 @@ "none", "tempo" ], - "description": "The type of tracing to use, either ***none*** or ***tempo***" + "description": "The type of tracing to use, either `none` or `tempo`. `none` will disable the Tracing module and `tempo` will install a Grafana Tempo deployment.\n\nDefault is `tempo`." }, "tempo": { "$ref": "#/$defs/Spec.Distribution.Modules.Tracing.Tempo" @@ -1617,11 +1652,12 @@ }, "Spec.Distribution.Modules.Tracing.Minio": { "type": "object", + "description": "Configuration for Tracing's MinIO deployment.", "additionalProperties": false, "properties": { "storageSize": { "type": "string", - "description": "The storage size for the minio pods" + "description": "The PVC size for each MinIO disk, 6 disks total." }, "rootUser": { "type": "object", @@ -1629,11 +1665,11 @@ "properties": { "username": { "type": "string", - "description": "The username for the minio root user" + "description": "The username for the default MinIO root user." }, "password": { "type": "string", - "description": "The password for the minio root user" + "description": "The password for the default MinIO root user." } } }, @@ -1645,10 +1681,11 @@ "Spec.Distribution.Modules.Tracing.Tempo": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Tempo package.", "properties": { "retentionTime": { "type": "string", - "description": "The retention time for the tempo pods" + "description": "The retention time for the traces stored in Tempo." }, "backend": { "type": "string", @@ -1656,31 +1693,32 @@ "minio", "externalEndpoint" ], - "description": "The backend for the tempo pods, must be ***minio*** or ***externalEndpoint***" + "description": "The storage backend type for Tempo. `minio` will use an in-cluster MinIO deployment for object storage, `externalEndpoint` can be used to point to an external S3-compatible object storage instead of deploying an in-cluster MinIO." }, "externalEndpoint": { + "description": "Configuration for Tempo's external storage backend.", "type": "object", "additionalProperties": false, "properties": { "endpoint": { "type": "string", - "description": "The endpoint of the external tempo backend" + "description": "External S3-compatible endpoint for Tempo's storage." }, "insecure": { "type": "boolean", - "description": "If true, the external tempo backend will not use tls" + "description": "If true, will use HTTP as protocol instead of HTTPS." }, "secretAccessKey": { "type": "string", - "description": "The secret access key of the external tempo backend" + "description": "The secret access key (password) for the external S3-compatible bucket." }, "accessKeyId": { "type": "string", - "description": "The access key id of the external tempo backend" + "description": "The access key ID (username) for the external S3-compatible bucket." }, "bucketName": { "type": "string", - "description": "The bucket name of the external tempo backend" + "description": "The bucket name of the external S3-compatible object storage." } } }, @@ -1694,12 +1732,10 @@ "additionalProperties": false, "properties": { "vpc": { - "$ref": "#/$defs/Spec.Infrastructure.Vpc", - "description": "This key defines the VPC that will be created in AWS" + "$ref": "#/$defs/Spec.Infrastructure.Vpc" }, "vpn": { - "$ref": "#/$defs/Spec.Infrastructure.Vpn", - "description": "This section defines the creation of VPN bastions" + "$ref": "#/$defs/Spec.Infrastructure.Vpn" } }, "allOf": [ @@ -1777,6 +1813,7 @@ }, "Spec.Infrastructure.Vpc": { "type": "object", + "description": "Configuration for the VPC that will be created to host the EKS cluster and its related resources. If you already have a VPC that you want to use, leave this section empty and use `.spec.kubernetes.vpcId` instead.", "additionalProperties": false, "properties": { "network": { @@ -1793,7 +1830,7 @@ "properties": { "cidr": { "$ref": "#/$defs/Types.Cidr", - "description": "This is the CIDR of the VPC that will be created" + "description": "The network CIDR for the VPC that will be created" }, "subnetsCidrs": { "$ref": "#/$defs/Spec.Infrastructure.Vpc.Network.SubnetsCidrs" @@ -1806,6 +1843,7 @@ }, "Spec.Infrastructure.Vpc.Network.SubnetsCidrs": { "type": "object", + "description": "Network CIDRS configuration for private and public subnets.", "additionalProperties": false, "properties": { "private": { @@ -1813,14 +1851,14 @@ "items": { "$ref": "#/$defs/Types.Cidr" }, - "description": "These are the CIRDs for the private subnets, where the nodes, the pods, and the private load balancers will be created" + "description": "Network CIRDs for the private subnets, where the nodes, the pods, and the private load balancers will be created" }, "public": { "type": "array", "items": { "$ref": "#/$defs/Types.Cidr" }, - "description": "These are the CIDRs for the public subnets, where the public load balancers and the VPN servers will be created" + "description": "Network CIDRs for the public subnets, where the public load balancers and the VPN servers will be created" } }, "required": [ @@ -1830,50 +1868,51 @@ }, "Spec.Infrastructure.Vpn": { "type": "object", + "description": "Configuration for the VPN server instances.", "additionalProperties": false, "properties": { "instances": { "type": "integer", - "description": "The number of instances to create, 0 to skip the creation" + "description": "The number of VPN server instances to create, `0` to skip the creation." }, "port": { "$ref": "#/$defs/Types.TcpPort", - "description": "The port used by the OpenVPN server" + "description": "The port where each OpenVPN server will listen for connections." }, "instanceType": { "type": "string", - "description": "The size of the AWS EC2 instance" + "description": "The type of the AWS EC2 instance for each VPN server. Follows AWS EC2 nomenclature. Example: `t3-micro`." }, "diskSize": { "type": "integer", - "description": "The size of the disk in GB" + "description": "The size of the disk in GB for each VPN server. Example: entering `50` will create disks of 50 GB." }, "operatorName": { "type": "string", - "description": "The username of the account to create in the bastion's operating system" + "description": "The username of the account to create in the bastion's operating system." }, "dhParamsBits": { "type": "integer", - "description": "The dhParamsBits size used for the creation of the .pem file that will be used in the dh openvpn server.conf file" + "description": "The `dhParamsBits` size used for the creation of the .pem file that will be used in the dh openvpn server.conf file." }, "vpnClientsSubnetCidr": { "$ref": "#/$defs/Types.Cidr", - "description": "The CIDR that will be used to assign IP addresses to the VPN clients when connected" + "description": "The network CIDR that will be used to assign IP addresses to the VPN clients when connected." }, "ssh": { "$ref": "#/$defs/Spec.Infrastructure.Vpn.Ssh" }, "vpcId": { "$ref": "#/$defs/Types.AwsVpcId", - "description": "The VPC ID where the VPN servers will be created, required only if .spec.infrastructure.vpc is omitted" + "description": "The ID of the VPC where the VPN server instances will be created, required only if `.spec.infrastructure.vpc` is omitted." }, "bucketNamePrefix": { "$ref": "#/$defs/Types.AwsS3BucketNamePrefix", - "description": "This value defines the prefix that will be used to create the bucket name where the VPN servers will store the states" + "description": "This value defines the prefix for the bucket name where the VPN servers will store their state (VPN certificates, users)." }, "iamUserNameOverride": { "$ref": "#/$defs/Types.AwsIamRoleName", - "description": "Overrides the default IAM user name for the VPN" + "description": "Overrides IAM user name for the VPN. Default is to use the cluster name." } }, "required": [ @@ -1897,7 +1936,7 @@ } ] }, - "description": "This value defines the public keys that will be added to the bastion's operating system NOTES: Not yet implemented" + "description": "**NOT IN USE**, use `githubUsersName` instead. This value defines the public keys that will be added to the bastion's operating system." }, "githubUsersName": { "type": "array", @@ -1905,14 +1944,14 @@ "type": "string" }, "minItems": 1, - "description": "The github user name list that will be used to get the ssh public key that will be added as authorized key to the operatorName user" + "description": "List of GitHub usernames from whom get their SSH public key and add as authorized keys of the `operatorName` user." }, "allowedFromCidrs": { "type": "array", "items": { "$ref": "#/$defs/Types.Cidr" }, - "description": "The CIDR enabled in the security group that can access the bastions in SSH" + "description": "The network CIDR enabled in the security group to access the VPN servers (bastions) via SSH. Setting this to `0.0.0.0/0` will allow any source." } }, "required": [ @@ -1922,33 +1961,34 @@ }, "Spec.Kubernetes": { "type": "object", + "description": "Defines the Kubernetes components configuration and the values needed for the `kubernetes` phase of furyctl.", "additionalProperties": false, "properties": { "vpcId": { "$ref": "#/$defs/Types.AwsVpcId", - "description": "This value defines the VPC ID where the EKS cluster will be created, required only if .spec.infrastructure.vpc is omitted" + "description": "Required only if `.spec.infrastructure.vpc` is omitted. This value defines the ID of the VPC where the EKS cluster and its related resources will be created." }, "clusterIAMRoleNamePrefixOverride": { "$ref": "#/$defs/Types.AwsIamRoleNamePrefix", - "description": "Overrides the default IAM role name prefix for the EKS cluster" + "description": "Overrides the default prefix for the IAM role name of the EKS cluster. If not set, a name will be generated from the cluster name." }, "workersIAMRoleNamePrefixOverride": { "$ref": "#/$defs/Types.AwsIamRoleNamePrefix", - "description": "Overrides the default IAM role name prefix for the EKS workers" + "description": "Overrides the default prefix for the IAM role name of the EKS workers. If not set, a name will be generated from the cluster name." }, "subnetIds": { "type": "array", "items": { "$ref": "#/$defs/Types.AwsSubnetId" }, - "description": "This value defines the subnet IDs where the EKS cluster will be created, required only if .spec.infrastructure.vpc is omitted" + "description": "Required only if `.spec.infrastructure.vpc` is omitted. This value defines the ID of the subnet where the EKS cluster will be created." }, "apiServer": { "$ref": "#/$defs/Spec.Kubernetes.APIServer" }, "serviceIpV4Cidr": { "$ref": "#/$defs/Types.Cidr", - "description": "This value defines the CIDR that will be used to assign IP addresses to the services" + "description": "This value defines the network CIDR that will be used to assign IP addresses to Kubernetes services." }, "nodeAllowedSshPublicKey": { "anyOf": [ @@ -1959,7 +1999,7 @@ "$ref": "#/$defs/Types.FileRef" } ], - "description": "This key contains the ssh public key that can connect to the nodes via SSH using the ec2-user user" + "description": "The SSH public key that can connect to the nodes via SSH using the `ec2-user` user. Example: the contents of your `~/.ssh/id_ras.pub` file." }, "nodePoolsLaunchKind": { "type": "string", @@ -1968,11 +2008,36 @@ "launch_templates", "both" ], - "description": "Either `launch_configurations`, `launch_templates` or `both`. For new clusters use `launch_templates`, for existing cluster you'll need to migrate from `launch_configurations` to `launch_templates` using `both` as interim." + "description": "Accepted values are `launch_configurations`, `launch_templates` or `both`. For new clusters use `launch_templates`, for adopting existing cluster you'll need to migrate from `launch_configurations` to `launch_templates` using `both` as interim." }, "logRetentionDays": { "type": "integer", - "description": "Optional Kubernetes Cluster log retention in days. Defaults to 90 days." + "description": "Optional Kubernetes Cluster log retention in CloudWatch, expressed in days. Setting the value to zero (`0`) makes retention last forever. Default is `90` days.", + "enum": [ + 0, + 1, + 3, + 5, + 7, + 14, + 30, + 60, + 90, + 120, + 150, + 180, + 365, + 400, + 545, + 731, + 1096, + 1827, + 2192, + 2557, + 2922, + 3288, + 3653 + ] }, "logsTypes": { "type": "array", @@ -2012,7 +2077,7 @@ "properties": { "privateAccess": { "type": "boolean", - "description": "This value defines if the API server will be accessible only from the private subnets" + "description": "This value defines if the Kubernetes API server will be accessible from the private subnets. Default it `true`." }, "privateAccessCidrs": { "type": "array", @@ -2020,7 +2085,7 @@ "$ref": "#/$defs/Types.Cidr" }, "minItems": 0, - "description": "This value defines the CIDRs that will be allowed to access the API server from the private subnets" + "description": "The network CIDRs from the private subnets that will be allowed access the Kubernetes API server." }, "publicAccessCidrs": { "type": "array", @@ -2028,11 +2093,11 @@ "$ref": "#/$defs/Types.Cidr" }, "minItems": 0, - "description": "This value defines the CIDRs that will be allowed to access the API server from the public subnets" + "description": "The network CIDRs from the public subnets that will be allowed access the Kubernetes API server." }, "publicAccess": { "type": "boolean", - "description": "This value defines if the API server will be accessible from the public subnets" + "description": "This value defines if the Kubernetes API server will be accessible from the public subnets. Default is `false`." } }, "required": [ @@ -2042,6 +2107,7 @@ }, "Spec.Kubernetes.AwsAuth": { "type": "object", + "description": "Optional additional security configuration for EKS IAM via the `aws-auth` configmap.\n\nRef: https://docs.aws.amazon.com/eks/latest/userguide/auth-configmap.html", "additionalProperties": false, "properties": { "additionalAccounts": { @@ -2049,21 +2115,21 @@ "items": { "type": "string" }, - "description": "This optional array defines additional AWS accounts that will be added to the aws-auth configmap" + "description": "This optional array defines additional AWS accounts that will be added to the `aws-auth` configmap." }, "users": { "type": "array", "items": { "$ref": "#/$defs/Spec.Kubernetes.AwsAuth.User" }, - "description": "This optional array defines additional IAM users that will be added to the aws-auth configmap" + "description": "This optional array defines additional IAM users that will be added to the `aws-auth` configmap." }, "roles": { "type": "array", "items": { "$ref": "#/$defs/Spec.Kubernetes.AwsAuth.Role" }, - "description": "This optional array defines additional IAM roles that will be added to the aws-auth configmap" + "description": "This optional array defines additional IAM roles that will be added to the `aws-auth` configmap." } } }, @@ -2116,6 +2182,7 @@ "Spec.Kubernetes.NodePool": { "type": "object", "additionalProperties": false, + "description": "Array with all the node pool definitions that will join the cluster. Each item is an object.", "properties": { "type": { "type": "string", @@ -2126,7 +2193,7 @@ }, "name": { "type": "string", - "description": "The name of the node pool" + "description": "The name of the node pool." }, "ami": { "$ref": "#/$defs/Spec.Kubernetes.NodePool.Ami" @@ -2137,7 +2204,7 @@ "docker", "containerd" ], - "description": "The container runtime to use for the nodes" + "description": "The container runtime to use in the nodes of the node pool. Default is `containerd`." }, "size": { "$ref": "#/$defs/Spec.Kubernetes.NodePool.Size" @@ -2150,26 +2217,26 @@ "items": { "$ref": "#/$defs/Types.AwsArn" }, - "description": "This optional array defines additional target groups to attach to the instances in the node pool" + "description": "This optional array defines additional target groups to attach to the instances in the node pool." }, "labels": { "$ref": "#/$defs/Types.KubeLabels", - "description": "Kubernetes labels that will be added to the nodes" + "description": "Kubernetes labels that will be added to the nodes." }, "taints": { "$ref": "#/$defs/Types.KubeTaints", - "description": "Kubernetes taints that will be added to the nodes" + "description": "Kubernetes taints that will be added to the nodes." }, "tags": { "$ref": "#/$defs/Types.AwsTags", - "description": "AWS tags that will be added to the ASG and EC2 instances" + "description": "AWS tags that will be added to the ASG and EC2 instances." }, "subnetIds": { "type": "array", "items": { "$ref": "#/$defs/Types.AwsSubnetId" }, - "description": "This value defines the subnet IDs where the nodes will be created" + "description": "Optional list of subnet IDs where to create the nodes." }, "additionalFirewallRules": { "$ref": "#/$defs/Spec.Kubernetes.NodePool.AdditionalFirewallRules" @@ -2190,13 +2257,15 @@ }, "type": { "type": "string", + "description": "The type of the Firewall rule, can be `ingress` for incoming traffic or `egress` for outgoing traffic.", "enum": [ "ingress", "egress" ] }, "tags": { - "$ref": "#/$defs/Types.AwsTags" + "$ref": "#/$defs/Types.AwsTags", + "description": "Additional AWS tags for the Firewall rule." }, "cidrBlocks": { "type": "array", @@ -2222,6 +2291,7 @@ }, "Spec.Kubernetes.NodePool.AdditionalFirewallRule.Ports": { "type": "object", + "description": "Port range for the Firewall Rule.", "additionalProperties": false, "properties": { "from": { @@ -2242,7 +2312,7 @@ "properties": { "name": { "type": "string", - "description": "The name of the FW rule" + "description": "The name of the Firewall rule." }, "type": { "type": "string", @@ -2250,19 +2320,19 @@ "ingress", "egress" ], - "description": "The type of the FW rule can be ingress or egress" + "description": "The type of the Firewall rule, can be `ingress` for incoming traffic or `egress` for outgoing traffic." }, "tags": { "$ref": "#/$defs/Types.AwsTags", - "description": "The tags of the FW rule" + "description": "Additional AWS tags for the Firewall rule." }, "self": { "type": "boolean", - "description": "If true, the source will be the security group itself" + "description": "If `true`, the source will be the security group itself." }, "protocol": { "$ref": "#/$defs/Types.AwsIpProtocol", - "description": "The protocol of the FW rule" + "description": "The protocol of the Firewall rule." }, "ports": { "$ref": "#/$defs/Spec.Kubernetes.NodePool.AdditionalFirewallRule.Ports" @@ -2282,7 +2352,7 @@ "properties": { "name": { "type": "string", - "description": "The name of the FW rule" + "description": "The name for the additional Firewall rule Security Group." }, "type": { "type": "string", @@ -2290,19 +2360,19 @@ "ingress", "egress" ], - "description": "The type of the FW rule can be ingress or egress" + "description": "The type of the Firewall rule, can be `ingress` for incoming traffic or `egress` for outgoing traffic." }, "tags": { "$ref": "#/$defs/Types.AwsTags", - "description": "The tags of the FW rule" + "description": "Additional AWS tags for the Firewall rule." }, "sourceSecurityGroupId": { "type": "string", - "description": "The source security group ID" + "description": "The source security group ID." }, "protocol": { "$ref": "#/$defs/Types.AwsIpProtocol", - "description": "The protocol of the FW rule" + "description": "The protocol of the Firewall rule." }, "ports": { "$ref": "#/$defs/Spec.Kubernetes.NodePool.AdditionalFirewallRule.Ports" @@ -2319,6 +2389,7 @@ "Spec.Kubernetes.NodePool.AdditionalFirewallRules": { "type": "object", "additionalProperties": false, + "description": "Optional additional firewall rules that will be attached to the nodes.", "properties": { "cidrBlocks": { "type": "array", @@ -2326,7 +2397,8 @@ "$ref": "#/$defs/Spec.Kubernetes.NodePool.AdditionalFirewallRule.CidrBlock" }, "minItems": 1, - "description": "The CIDR blocks for the FW rule. At the moment the first item of the list will be used, others will be ignored." + "maxItems": 1, + "description": "The CIDR blocks objects definition for the Firewall rule. Even though it is a list, only one item is currently supported. See https://github.com/sighupio/fury-eks-installer/issues/46 for more details." }, "sourceSecurityGroupId": { "type": "array", @@ -2350,11 +2422,11 @@ "properties": { "id": { "type": "string", - "description": "The AMI ID to use for the nodes" + "description": "Optional. Custom AMI ID to use for the nodes." }, "owner": { "type": "string", - "description": "The owner of the AMI" + "description": "Optional. The owner of the custom AMI." } }, "required": [ @@ -2365,21 +2437,23 @@ "Spec.Kubernetes.NodePool.Instance": { "type": "object", "additionalProperties": false, + "description": "Configuration for the instances that will be used in the node pool.", "properties": { "type": { "type": "string", - "description": "The instance type to use for the nodes" + "description": "The instance type to use for the nodes." }, "spot": { "type": "boolean", - "description": "If true, the nodes will be created as spot instances" + "description": "If `true`, the nodes will be created as spot instances. Default is `false`." }, "volumeSize": { "type": "integer", - "description": "The size of the disk in GB" + "description": "The size of the disk in GB." }, "volumeType": { "type": "string", + "description": "Volume type for the instance disk. Default is `gp2`.", "enum": [ "gp2", "gp3", @@ -2388,7 +2462,8 @@ ] }, "maxPods": { - "type": "integer" + "type": "integer", + "description": "Set the maximum pods per node to a custom value. If not set will use EKS default value that depends on the instance type.\n\nRef: https://github.com/awslabs/amazon-eks-ami/blob/main/templates/shared/runtime/eni-max-pods.txt" } }, "required": [ @@ -2402,12 +2477,12 @@ "min": { "type": "integer", "minimum": 0, - "description": "The minimum number of nodes in the node pool" + "description": "The minimum number of nodes in the node pool." }, "max": { "type": "integer", "minimum": 0, - "description": "The maximum number of nodes in the node pool" + "description": "The maximum number of nodes in the node pool." } }, "required": [ @@ -2442,6 +2517,7 @@ "Spec.ToolsConfiguration.Terraform.State": { "type": "object", "additionalProperties": false, + "description": "Configuration for storing the Terraform state of the cluster.", "properties": { "s3": { "$ref": "#/$defs/Spec.ToolsConfiguration.Terraform.State.S3" @@ -2454,22 +2530,23 @@ "Spec.ToolsConfiguration.Terraform.State.S3": { "type": "object", "additionalProperties": false, + "description": "Configuration for the S3 bucket used to store the Terraform state.", "properties": { "bucketName": { "$ref": "#/$defs/Types.AwsS3BucketName", - "description": "This value defines which bucket will be used to store all the states" + "description": "This value defines which bucket will be used to store all the states." }, "keyPrefix": { "$ref": "#/$defs/Types.AwsS3KeyPrefix", - "description": "This value defines which folder will be used to store all the states inside the bucket" + "description": "This value defines which folder will be used to store all the states inside the bucket." }, "region": { "$ref": "#/$defs/Types.AwsRegion", - "description": "This value defines in which region the bucket is located" + "description": "This value defines in which region the bucket is located." }, "skipRegionValidation": { "type": "boolean", - "description": "This value defines if the region of the bucket should be validated or not by Terraform, useful when using a bucket in a recently added region" + "description": "This value defines if the region of the bucket should be validated or not by Terraform, useful when using a bucket in a recently added region." } }, "required": [ @@ -2596,7 +2673,7 @@ "properties": { "nodeSelector": { "$ref": "#/$defs/Types.KubeNodeSelector", - "description": "The node selector to use to place the pods for the minio module" + "description": "Set to override the node selector used to place the pods of the package." }, "tolerations": { "type": [ @@ -2606,7 +2683,7 @@ "items": { "$ref": "#/$defs/Types.KubeToleration" }, - "description": "The tolerations that will be added to the pods for the cert-manager module" + "description": "Set to override the tolerations that will be added to the pods of the package." } } }, @@ -2616,7 +2693,7 @@ "properties": { "nodeSelector": { "$ref": "#/$defs/Types.KubeNodeSelector", - "description": "The node selector to use to place the pods for the load balancer controller module" + "description": "The node selector to use to place the pods for the load balancer controller module." }, "tolerations": { "type": [ @@ -2626,7 +2703,7 @@ "items": { "$ref": "#/$defs/Types.KubeToleration" }, - "description": "The tolerations that will be added to the pods for the cluster autoscaler module" + "description": "The tolerations that will be added to the pods for the cluster autoscaler module." }, "iamRoleName": { "$ref": "#/$defs/Types.AwsIamRoleName" @@ -2635,11 +2712,12 @@ }, "Types.FuryModuleOverrides": { "type": "object", + "description": "Override the common configuration with a particular configuration for the module.", "additionalProperties": false, "properties": { "nodeSelector": { "$ref": "#/$defs/Types.KubeNodeSelector", - "description": "The node selector to use to place the pods for the dr module" + "description": "Set to override the node selector used to place the pods of the module." }, "tolerations": { "type": [ @@ -2649,7 +2727,7 @@ "items": { "$ref": "#/$defs/Types.KubeToleration" }, - "description": "The tolerations that will be added to the pods for the monitoring module" + "description": "Set to override the tolerations that will be added to the pods of the module." }, "ingresses": { "type": "object", @@ -2665,15 +2743,15 @@ "properties": { "disableAuth": { "type": "boolean", - "description": "If true, the ingress will not have authentication" + "description": "If true, the ingress will not have authentication even if `.spec.modules.auth.provider.type` is SSO or Basic Auth." }, "host": { "type": "string", - "description": "The host of the ingress" + "description": "Use this host for the ingress instead of the default one." }, "ingressClass": { "type": "string", - "description": "The ingress class of the ingress" + "description": "Use this ingress class for the ingress instead of the default one." } } }, @@ -2706,11 +2784,11 @@ "properties": { "cpu": { "type": "string", - "description": "The cpu request for the prometheus pods" + "description": "The CPU request for the Pod, in cores. Example: `500m`." }, "memory": { "type": "string", - "description": "The memory request for the opensearch pods" + "description": "The memory request for the Pod. Example: `500M`." } } }, @@ -2720,11 +2798,11 @@ "properties": { "cpu": { "type": "string", - "description": "The cpu limit for the opensearch pods" + "description": "The CPU limit for the Pod. Example: `1000m`." }, "memory": { "type": "string", - "description": "The memory limit for the opensearch pods" + "description": "The memory limit for the Pod. Example: `1G`." } } } @@ -2802,7 +2880,7 @@ }, "$schema": "http://json-schema.org/draft-07/schema#", "additionalProperties": false, - "description": "A Fury Cluster deployed through AWS's Elastic Kubernetes Service", + "description": "A KFD Cluster deployed on top of AWS's Elastic Kubernetes Service (EKS).", "properties": { "apiVersion": { "type": "string", diff --git a/schemas/public/ekscluster-kfd-v1alpha2.json b/schemas/public/ekscluster-kfd-v1alpha2.json index 49e3379dc..82e4398d4 100644 --- a/schemas/public/ekscluster-kfd-v1alpha2.json +++ b/schemas/public/ekscluster-kfd-v1alpha2.json @@ -1,6 +1,6 @@ { "$schema": "http://json-schema.org/draft-07/schema#", - "description": "A Fury Cluster deployed through AWS's Elastic Kubernetes Service", + "description": "A KFD Cluster deployed on top of AWS's Elastic Kubernetes Service (EKS).", "type": "object", "properties": { "apiVersion": { @@ -34,6 +34,7 @@ "properties": { "name": { "type": "string", + "description": "The name of the cluster. It will also be used as a prefix for all the other resources created.", "minLength": 1, "maxLength": 56 } @@ -48,17 +49,20 @@ "properties": { "distributionVersion": { "type": "string", + "description": "Defines which KFD version will be installed and, in consequence, the Kubernetes version used to create the cluster. It supports git tags and branches. Example: `v1.30.1`.", "minLength": 1 }, "region": { - "$ref": "#/$defs/Types.AwsRegion" + "$ref": "#/$defs/Types.AwsRegion", + "description": "Defines in which AWS region the cluster and all the related resources will be created." }, "tags": { "$ref": "#/$defs/Types.AwsTags", "description": "This map defines which will be the common tags that will be added to all the resources created on AWS." }, "toolsConfiguration": { - "$ref": "#/$defs/Spec.ToolsConfiguration" + "$ref": "#/$defs/Spec.ToolsConfiguration", + "description": "Configuration for tools used by furyctl, like Terraform." }, "infrastructure": { "$ref": "#/$defs/Spec.Infrastructure" @@ -155,6 +159,7 @@ "Spec.ToolsConfiguration.Terraform.State": { "type": "object", "additionalProperties": false, + "description": "Configuration for storing the Terraform state of the cluster.", "properties": { "s3": { "$ref": "#/$defs/Spec.ToolsConfiguration.Terraform.State.S3" @@ -167,22 +172,23 @@ "Spec.ToolsConfiguration.Terraform.State.S3": { "type": "object", "additionalProperties": false, + "description": "Configuration for the S3 bucket used to store the Terraform state.", "properties": { "bucketName": { "$ref": "#/$defs/Types.AwsS3BucketName", - "description": "This value defines which bucket will be used to store all the states" + "description": "This value defines which bucket will be used to store all the states." }, "keyPrefix": { "$ref": "#/$defs/Types.AwsS3KeyPrefix", - "description": "This value defines which folder will be used to store all the states inside the bucket" + "description": "This value defines which folder will be used to store all the states inside the bucket." }, "region": { "$ref": "#/$defs/Types.AwsRegion", - "description": "This value defines in which region the bucket is located" + "description": "This value defines in which region the bucket is located." }, "skipRegionValidation": { "type": "boolean", - "description": "This value defines if the region of the bucket should be validated or not by Terraform, useful when using a bucket in a recently added region" + "description": "This value defines if the region of the bucket should be validated or not by Terraform, useful when using a bucket in a recently added region." } }, "required": [ @@ -196,12 +202,10 @@ "additionalProperties": false, "properties": { "vpc": { - "$ref": "#/$defs/Spec.Infrastructure.Vpc", - "description": "This key defines the VPC that will be created in AWS" + "$ref": "#/$defs/Spec.Infrastructure.Vpc" }, "vpn": { - "$ref": "#/$defs/Spec.Infrastructure.Vpn", - "description": "This section defines the creation of VPN bastions" + "$ref": "#/$defs/Spec.Infrastructure.Vpn" } }, "allOf": [ @@ -279,6 +283,7 @@ }, "Spec.Infrastructure.Vpc": { "type": "object", + "description": "Configuration for the VPC that will be created to host the EKS cluster and its related resources. If you already have a VPC that you want to use, leave this section empty and use `.spec.kubernetes.vpcId` instead.", "additionalProperties": false, "properties": { "network": { @@ -295,7 +300,7 @@ "properties": { "cidr": { "$ref": "#/$defs/Types.Cidr", - "description": "This is the CIDR of the VPC that will be created" + "description": "The network CIDR for the VPC that will be created" }, "subnetsCidrs": { "$ref": "#/$defs/Spec.Infrastructure.Vpc.Network.SubnetsCidrs" @@ -308,6 +313,7 @@ }, "Spec.Infrastructure.Vpc.Network.SubnetsCidrs": { "type": "object", + "description": "Network CIDRS configuration for private and public subnets.", "additionalProperties": false, "properties": { "private": { @@ -315,14 +321,14 @@ "items": { "$ref": "#/$defs/Types.Cidr" }, - "description": "These are the CIRDs for the private subnets, where the nodes, the pods, and the private load balancers will be created" + "description": "Network CIRDs for the private subnets, where the nodes, the pods, and the private load balancers will be created" }, "public": { "type": "array", "items": { "$ref": "#/$defs/Types.Cidr" }, - "description": "These are the CIDRs for the public subnets, where the public load balancers and the VPN servers will be created" + "description": "Network CIDRs for the public subnets, where the public load balancers and the VPN servers will be created" } }, "required": [ @@ -332,50 +338,51 @@ }, "Spec.Infrastructure.Vpn": { "type": "object", + "description": "Configuration for the VPN server instances.", "additionalProperties": false, "properties": { "instances": { "type": "integer", - "description": "The number of instances to create, 0 to skip the creation" + "description": "The number of VPN server instances to create, `0` to skip the creation." }, "port": { "$ref": "#/$defs/Types.TcpPort", - "description": "The port used by the OpenVPN server" + "description": "The port where each OpenVPN server will listen for connections." }, "instanceType": { "type": "string", - "description": "The size of the AWS EC2 instance" + "description": "The type of the AWS EC2 instance for each VPN server. Follows AWS EC2 nomenclature. Example: `t3-micro`." }, "diskSize": { "type": "integer", - "description": "The size of the disk in GB" + "description": "The size of the disk in GB for each VPN server. Example: entering `50` will create disks of 50 GB." }, "operatorName": { "type": "string", - "description": "The username of the account to create in the bastion's operating system" + "description": "The username of the account to create in the bastion's operating system." }, "dhParamsBits": { "type": "integer", - "description": "The dhParamsBits size used for the creation of the .pem file that will be used in the dh openvpn server.conf file" + "description": "The `dhParamsBits` size used for the creation of the .pem file that will be used in the dh openvpn server.conf file." }, "vpnClientsSubnetCidr": { "$ref": "#/$defs/Types.Cidr", - "description": "The CIDR that will be used to assign IP addresses to the VPN clients when connected" + "description": "The network CIDR that will be used to assign IP addresses to the VPN clients when connected." }, "ssh": { "$ref": "#/$defs/Spec.Infrastructure.Vpn.Ssh" }, "vpcId": { "$ref": "#/$defs/Types.AwsVpcId", - "description": "The VPC ID where the VPN servers will be created, required only if .spec.infrastructure.vpc is omitted" + "description": "The ID of the VPC where the VPN server instances will be created, required only if `.spec.infrastructure.vpc` is omitted." }, "bucketNamePrefix": { "$ref": "#/$defs/Types.AwsS3BucketNamePrefix", - "description": "This value defines the prefix that will be used to create the bucket name where the VPN servers will store the states" + "description": "This value defines the prefix for the bucket name where the VPN servers will store their state (VPN certificates, users)." }, "iamUserNameOverride": { "$ref": "#/$defs/Types.AwsIamRoleName", - "description": "Overrides the default IAM user name for the VPN" + "description": "Overrides IAM user name for the VPN. Default is to use the cluster name." } }, "required": [ @@ -399,7 +406,7 @@ } ] }, - "description": "This value defines the public keys that will be added to the bastion's operating system NOTES: Not yet implemented" + "description": "**NOT IN USE**, use `githubUsersName` instead. This value defines the public keys that will be added to the bastion's operating system." }, "githubUsersName": { "type": "array", @@ -407,14 +414,14 @@ "type": "string" }, "minItems": 1, - "description": "The github user name list that will be used to get the ssh public key that will be added as authorized key to the operatorName user" + "description": "List of GitHub usernames from whom get their SSH public key and add as authorized keys of the `operatorName` user." }, "allowedFromCidrs": { "type": "array", "items": { "$ref": "#/$defs/Types.Cidr" }, - "description": "The CIDR enabled in the security group that can access the bastions in SSH" + "description": "The network CIDR enabled in the security group to access the VPN servers (bastions) via SSH. Setting this to `0.0.0.0/0` will allow any source." } }, "required": [ @@ -424,33 +431,34 @@ }, "Spec.Kubernetes": { "type": "object", + "description": "Defines the Kubernetes components configuration and the values needed for the `kubernetes` phase of furyctl.", "additionalProperties": false, "properties": { "vpcId": { "$ref": "#/$defs/Types.AwsVpcId", - "description": "This value defines the VPC ID where the EKS cluster will be created, required only if .spec.infrastructure.vpc is omitted" + "description": "Required only if `.spec.infrastructure.vpc` is omitted. This value defines the ID of the VPC where the EKS cluster and its related resources will be created." }, "clusterIAMRoleNamePrefixOverride": { "$ref": "#/$defs/Types.AwsIamRoleNamePrefix", - "description": "Overrides the default IAM role name prefix for the EKS cluster" + "description": "Overrides the default prefix for the IAM role name of the EKS cluster. If not set, a name will be generated from the cluster name." }, "workersIAMRoleNamePrefixOverride": { "$ref": "#/$defs/Types.AwsIamRoleNamePrefix", - "description": "Overrides the default IAM role name prefix for the EKS workers" + "description": "Overrides the default prefix for the IAM role name of the EKS workers. If not set, a name will be generated from the cluster name." }, "subnetIds": { "type": "array", "items": { "$ref": "#/$defs/Types.AwsSubnetId" }, - "description": "This value defines the subnet IDs where the EKS cluster will be created, required only if .spec.infrastructure.vpc is omitted" + "description": "Required only if `.spec.infrastructure.vpc` is omitted. This value defines the ID of the subnet where the EKS cluster will be created." }, "apiServer": { "$ref": "#/$defs/Spec.Kubernetes.APIServer" }, "serviceIpV4Cidr": { "$ref": "#/$defs/Types.Cidr", - "description": "This value defines the CIDR that will be used to assign IP addresses to the services" + "description": "This value defines the network CIDR that will be used to assign IP addresses to Kubernetes services." }, "nodeAllowedSshPublicKey": { "anyOf": [ @@ -461,7 +469,7 @@ "$ref": "#/$defs/Types.FileRef" } ], - "description": "This key contains the ssh public key that can connect to the nodes via SSH using the ec2-user user" + "description": "The SSH public key that can connect to the nodes via SSH using the `ec2-user` user. Example: the contents of your `~/.ssh/id_ras.pub` file." }, "nodePoolsLaunchKind": { "type": "string", @@ -470,11 +478,36 @@ "launch_templates", "both" ], - "description": "Either `launch_configurations`, `launch_templates` or `both`. For new clusters use `launch_templates`, for existing cluster you'll need to migrate from `launch_configurations` to `launch_templates` using `both` as interim." + "description": "Accepted values are `launch_configurations`, `launch_templates` or `both`. For new clusters use `launch_templates`, for adopting existing cluster you'll need to migrate from `launch_configurations` to `launch_templates` using `both` as interim." }, "logRetentionDays": { "type": "integer", - "description": "Optional Kubernetes Cluster log retention in days. Defaults to 90 days." + "description": "Optional Kubernetes Cluster log retention in CloudWatch, expressed in days. Setting the value to zero (`0`) makes retention last forever. Default is `90` days.", + "enum": [ + 0, + 1, + 3, + 5, + 7, + 14, + 30, + 60, + 90, + 120, + 150, + 180, + 365, + 400, + 545, + 731, + 1096, + 1827, + 2192, + 2557, + 2922, + 3288, + 3653 + ] }, "logsTypes": { "type": "array", @@ -514,7 +547,7 @@ "properties": { "privateAccess": { "type": "boolean", - "description": "This value defines if the API server will be accessible only from the private subnets" + "description": "This value defines if the Kubernetes API server will be accessible from the private subnets. Default it `true`." }, "privateAccessCidrs": { "type": "array", @@ -522,7 +555,7 @@ "$ref": "#/$defs/Types.Cidr" }, "minItems": 0, - "description": "This value defines the CIDRs that will be allowed to access the API server from the private subnets" + "description": "The network CIDRs from the private subnets that will be allowed access the Kubernetes API server." }, "publicAccessCidrs": { "type": "array", @@ -530,11 +563,11 @@ "$ref": "#/$defs/Types.Cidr" }, "minItems": 0, - "description": "This value defines the CIDRs that will be allowed to access the API server from the public subnets" + "description": "The network CIDRs from the public subnets that will be allowed access the Kubernetes API server." }, "publicAccess": { "type": "boolean", - "description": "This value defines if the API server will be accessible from the public subnets" + "description": "This value defines if the Kubernetes API server will be accessible from the public subnets. Default is `false`." } }, "required": [ @@ -545,6 +578,7 @@ "Spec.Kubernetes.NodePool": { "type": "object", "additionalProperties": false, + "description": "Array with all the node pool definitions that will join the cluster. Each item is an object.", "properties": { "type": { "type": "string", @@ -555,7 +589,7 @@ }, "name": { "type": "string", - "description": "The name of the node pool" + "description": "The name of the node pool." }, "ami": { "$ref": "#/$defs/Spec.Kubernetes.NodePool.Ami" @@ -566,7 +600,7 @@ "docker", "containerd" ], - "description": "The container runtime to use for the nodes" + "description": "The container runtime to use in the nodes of the node pool. Default is `containerd`." }, "size": { "$ref": "#/$defs/Spec.Kubernetes.NodePool.Size" @@ -579,26 +613,26 @@ "items": { "$ref": "#/$defs/Types.AwsArn" }, - "description": "This optional array defines additional target groups to attach to the instances in the node pool" + "description": "This optional array defines additional target groups to attach to the instances in the node pool." }, "labels": { "$ref": "#/$defs/Types.KubeLabels", - "description": "Kubernetes labels that will be added to the nodes" + "description": "Kubernetes labels that will be added to the nodes." }, "taints": { "$ref": "#/$defs/Types.KubeTaints", - "description": "Kubernetes taints that will be added to the nodes" + "description": "Kubernetes taints that will be added to the nodes." }, "tags": { "$ref": "#/$defs/Types.AwsTags", - "description": "AWS tags that will be added to the ASG and EC2 instances" + "description": "AWS tags that will be added to the ASG and EC2 instances." }, "subnetIds": { "type": "array", "items": { "$ref": "#/$defs/Types.AwsSubnetId" }, - "description": "This value defines the subnet IDs where the nodes will be created" + "description": "Optional list of subnet IDs where to create the nodes." }, "additionalFirewallRules": { "$ref": "#/$defs/Spec.Kubernetes.NodePool.AdditionalFirewallRules" @@ -616,11 +650,11 @@ "properties": { "id": { "type": "string", - "description": "The AMI ID to use for the nodes" + "description": "Optional. Custom AMI ID to use for the nodes." }, "owner": { "type": "string", - "description": "The owner of the AMI" + "description": "Optional. The owner of the custom AMI." } }, "required": [ @@ -631,21 +665,23 @@ "Spec.Kubernetes.NodePool.Instance": { "type": "object", "additionalProperties": false, + "description": "Configuration for the instances that will be used in the node pool.", "properties": { "type": { "type": "string", - "description": "The instance type to use for the nodes" + "description": "The instance type to use for the nodes." }, "spot": { "type": "boolean", - "description": "If true, the nodes will be created as spot instances" + "description": "If `true`, the nodes will be created as spot instances. Default is `false`." }, "volumeSize": { "type": "integer", - "description": "The size of the disk in GB" + "description": "The size of the disk in GB." }, "volumeType": { "type": "string", + "description": "Volume type for the instance disk. Default is `gp2`.", "enum": [ "gp2", "gp3", @@ -654,7 +690,8 @@ ] }, "maxPods": { - "type": "integer" + "type": "integer", + "description": "Set the maximum pods per node to a custom value. If not set will use EKS default value that depends on the instance type.\n\nRef: https://github.com/awslabs/amazon-eks-ami/blob/main/templates/shared/runtime/eni-max-pods.txt" } }, "required": [ @@ -668,12 +705,12 @@ "min": { "type": "integer", "minimum": 0, - "description": "The minimum number of nodes in the node pool" + "description": "The minimum number of nodes in the node pool." }, "max": { "type": "integer", "minimum": 0, - "description": "The maximum number of nodes in the node pool" + "description": "The maximum number of nodes in the node pool." } }, "required": [ @@ -684,6 +721,7 @@ "Spec.Kubernetes.NodePool.AdditionalFirewallRules": { "type": "object", "additionalProperties": false, + "description": "Optional additional firewall rules that will be attached to the nodes.", "properties": { "cidrBlocks": { "type": "array", @@ -691,7 +729,8 @@ "$ref": "#/$defs/Spec.Kubernetes.NodePool.AdditionalFirewallRule.CidrBlock" }, "minItems": 1, - "description": "The CIDR blocks for the FW rule. At the moment the first item of the list will be used, others will be ignored." + "maxItems": 1, + "description": "The CIDR blocks objects definition for the Firewall rule. Even though it is a list, only one item is currently supported. See https://github.com/sighupio/fury-eks-installer/issues/46 for more details." }, "sourceSecurityGroupId": { "type": "array", @@ -718,13 +757,15 @@ }, "type": { "type": "string", + "description": "The type of the Firewall rule, can be `ingress` for incoming traffic or `egress` for outgoing traffic.", "enum": [ "ingress", "egress" ] }, "tags": { - "$ref": "#/$defs/Types.AwsTags" + "$ref": "#/$defs/Types.AwsTags", + "description": "Additional AWS tags for the Firewall rule." }, "cidrBlocks": { "type": "array", @@ -754,7 +795,7 @@ "properties": { "name": { "type": "string", - "description": "The name of the FW rule" + "description": "The name for the additional Firewall rule Security Group." }, "type": { "type": "string", @@ -762,19 +803,19 @@ "ingress", "egress" ], - "description": "The type of the FW rule can be ingress or egress" + "description": "The type of the Firewall rule, can be `ingress` for incoming traffic or `egress` for outgoing traffic." }, "tags": { "$ref": "#/$defs/Types.AwsTags", - "description": "The tags of the FW rule" + "description": "Additional AWS tags for the Firewall rule." }, "sourceSecurityGroupId": { "type": "string", - "description": "The source security group ID" + "description": "The source security group ID." }, "protocol": { "$ref": "#/$defs/Types.AwsIpProtocol", - "description": "The protocol of the FW rule" + "description": "The protocol of the Firewall rule." }, "ports": { "$ref": "#/$defs/Spec.Kubernetes.NodePool.AdditionalFirewallRule.Ports" @@ -794,7 +835,7 @@ "properties": { "name": { "type": "string", - "description": "The name of the FW rule" + "description": "The name of the Firewall rule." }, "type": { "type": "string", @@ -802,19 +843,19 @@ "ingress", "egress" ], - "description": "The type of the FW rule can be ingress or egress" + "description": "The type of the Firewall rule, can be `ingress` for incoming traffic or `egress` for outgoing traffic." }, "tags": { "$ref": "#/$defs/Types.AwsTags", - "description": "The tags of the FW rule" + "description": "Additional AWS tags for the Firewall rule." }, "self": { "type": "boolean", - "description": "If true, the source will be the security group itself" + "description": "If `true`, the source will be the security group itself." }, "protocol": { "$ref": "#/$defs/Types.AwsIpProtocol", - "description": "The protocol of the FW rule" + "description": "The protocol of the Firewall rule." }, "ports": { "$ref": "#/$defs/Spec.Kubernetes.NodePool.AdditionalFirewallRule.Ports" @@ -830,6 +871,7 @@ }, "Spec.Kubernetes.NodePool.AdditionalFirewallRule.Ports": { "type": "object", + "description": "Port range for the Firewall Rule.", "additionalProperties": false, "properties": { "from": { @@ -846,6 +888,7 @@ }, "Spec.Kubernetes.AwsAuth": { "type": "object", + "description": "Optional additional security configuration for EKS IAM via the `aws-auth` configmap.\n\nRef: https://docs.aws.amazon.com/eks/latest/userguide/auth-configmap.html", "additionalProperties": false, "properties": { "additionalAccounts": { @@ -853,21 +896,21 @@ "items": { "type": "string" }, - "description": "This optional array defines additional AWS accounts that will be added to the aws-auth configmap" + "description": "This optional array defines additional AWS accounts that will be added to the `aws-auth` configmap." }, "users": { "type": "array", "items": { "$ref": "#/$defs/Spec.Kubernetes.AwsAuth.User" }, - "description": "This optional array defines additional IAM users that will be added to the aws-auth configmap" + "description": "This optional array defines additional IAM users that will be added to the `aws-auth` configmap." }, "roles": { "type": "array", "items": { "$ref": "#/$defs/Spec.Kubernetes.AwsAuth.Role" }, - "description": "This optional array defines additional IAM roles that will be added to the aws-auth configmap" + "description": "This optional array defines additional IAM roles that will be added to the `aws-auth` configmap." } } }, @@ -1004,28 +1047,29 @@ "Spec.Distribution.Common": { "type": "object", "additionalProperties": false, + "description": "Common configuration for all the distribution modules.", "properties": { "nodeSelector": { "$ref": "#/$defs/Types.KubeNodeSelector", - "description": "The node selector to use to place the pods for all the KFD modules" + "description": "The node selector to use to place the pods for all the KFD modules. Follows Kubernetes selector format. Example: `node.kubernetes.io/role: infra`." }, "tolerations": { "type": "array", "items": { "$ref": "#/$defs/Types.KubeToleration" }, - "description": "The tolerations that will be added to the pods for all the KFD modules" + "description": "An array with the tolerations that will be added to the pods for all the KFD modules. Follows Kubernetes tolerations format. Example:\n\n```yaml\n- effect: NoSchedule\n key: node.kubernetes.io/role\n value: infra\n```" }, "provider": { "$ref": "#/$defs/Spec.Distribution.Common.Provider" }, "relativeVendorPath": { "type": "string", - "description": "The relative path to the vendor directory, does not need to be changed" + "description": "The relative path to the vendor directory, does not need to be changed." }, "registry": { "type": "string", - "description": "URL of the registry where to pull images from for the Distribution phase. (Default is registry.sighup.io/fury).\n\nNOTE: If plugins are pulling from the default registry, the registry will be replaced for these plugins too." + "description": "URL of the registry where to pull images from for the Distribution phase. (Default is `registry.sighup.io/fury`)." } } }, @@ -1035,7 +1079,7 @@ "properties": { "type": { "type": "string", - "description": "The type of the provider, must be EKS if specified" + "description": "The provider type. Don't set. FOR INTERNAL USE ONLY." } }, "required": [ @@ -1090,14 +1134,15 @@ }, "baseDomain": { "type": "string", - "description": "the base domain used for all the KFD ingresses, if in the nginx dual configuration, it should be the same as the .spec.distribution.modules.ingress.dns.private.name zone" + "description": "The base domain used for all the KFD ingresses. If in the nginx `dual` configuration type, this value should be the same as the `.spec.distribution.modules.ingress.dns.private.name` zone." }, "nginx": { "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.Nginx", - "description": "Configurations for the nginx ingress controller module" + "description": "Configurations for the Ingress nginx controller package." }, "certManager": { - "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.CertManager" + "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.CertManager", + "description": "Configuration for the cert-manager package. Required even if `ingress.nginx.type` is `none`, cert-manager is used for managing other certificates in the cluster besides the TLS termination certificates for the ingresses." }, "dns": { "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.DNS" @@ -1135,20 +1180,21 @@ "Spec.Distribution.Modules.Ingress.Overrides": { "type": "object", "additionalProperties": false, + "description": "Override the common configuration with a particular configuration for the Ingress module.", "properties": { "ingresses": { "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.Overrides.Ingresses" }, "nodeSelector": { "$ref": "#/$defs/Types.KubeNodeSelector", - "description": "The node selector to use to place the pods for the ingress module" + "description": "Set to override the node selector used to place the pods of the Ingress module." }, "tolerations": { "type": "array", "items": { "$ref": "#/$defs/Types.KubeToleration" }, - "description": "The tolerations that will be added to the pods for the ingress module" + "description": "Set to override the tolerations that will be added to the pods of the Ingress module." } } }, @@ -1181,7 +1227,7 @@ "single", "dual" ], - "description": "The type of the nginx ingress controller, must be ***none***, ***single*** or ***dual***" + "description": "The type of the Ingress nginx controller, options are:\n- `none`: no ingress controller will be installed and no infrastructural ingresses will be created.\n- `single`: a single ingress controller with ingress class `nginx` will be installed to manage all the ingress resources, infrastructural ingresses will be created.\n- `dual`: two independent ingress controllers will be installed, one for the `internal` ingress class intended for private ingresses and one for the `external` ingress class intended for public ingresses. KFD infrastructural ingresses wil use the `internal` ingress class when using the dual type.\n\nDefault is `single`." }, "tls": { "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.Nginx.TLS" @@ -1205,7 +1251,7 @@ "secret", "none" ], - "description": "The provider of the TLS certificate, must be ***none***, ***certManager*** or ***secret***" + "description": "The provider of the TLS certificates for the ingresses, one of: `none`, `certManager`, or `secret`." }, "secret": { "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.Nginx.TLS.Secret" @@ -1230,16 +1276,19 @@ "Spec.Distribution.Modules.Ingress.Nginx.TLS.Secret": { "type": "object", "additionalProperties": false, + "description": "Kubernetes TLS secret for the ingresses TLS certificate.", "properties": { "cert": { "type": "string", - "description": "The certificate file content or you can use the file notation to get the content from a file" + "description": "The certificate file's content. You can use the `\"{file://}\"` notation to get the content from a file." }, "key": { - "type": "string" + "type": "string", + "description": "The signing key file's content. You can use the `\"{file://}\"` notation to get the content from a file." }, "ca": { - "type": "string" + "type": "string", + "description": "The Certificate Authority certificate file's content. You can use the `\"{file://}\"` notation to get the content from a file." } }, "required": [ @@ -1251,6 +1300,7 @@ "Spec.Distribution.Modules.Ingress.CertManager": { "type": "object", "additionalProperties": false, + "description": "Configuration for the cert-manager package. Required even if `ingress.nginx.type` is `none`, cert-manager is used for managing other certificates in the cluster besides the TLS termination certificates for the ingresses.", "properties": { "clusterIssuer": { "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.CertManager.ClusterIssuer" @@ -1266,15 +1316,16 @@ "Spec.Distribution.Modules.Ingress.CertManager.ClusterIssuer": { "type": "object", "additionalProperties": false, + "description": "Configuration for the cert-manager's ACME clusterIssuer used to request certificates from Let's Encrypt.", "properties": { "name": { "type": "string", - "description": "The name of the cluster issuer" + "description": "Name of the clusterIssuer." }, "email": { "type": "string", "format": "email", - "description": "The email of the cluster issuer" + "description": "The email address to use during the certificate issuing process." }, "type": { "type": "string", @@ -1282,11 +1333,11 @@ "dns01", "http01" ], - "description": "The type of the cluster issuer, must be ***dns01*** or ***http01***" + "description": "The type of the clusterIssuer, must be `dns01` for using DNS challenge or `http01` for using HTTP challenge." }, "solvers": { "type": "array", - "description": "The custom solvers configurations" + "description": "List of challenge solvers to use instead of the default one for the `http01` challenge." } }, "required": [ @@ -1308,6 +1359,7 @@ }, "Spec.Distribution.Modules.Ingress.DNS": { "type": "object", + "description": "DNS definition, used in conjunction with `externalDNS` package to automate DNS management and certificates emission.", "additionalProperties": false, "properties": { "public": { @@ -1331,11 +1383,11 @@ "properties": { "name": { "type": "string", - "description": "The name of the public hosted zone" + "description": "The name of the public hosted zone." }, "create": { "type": "boolean", - "description": "If true, the public hosted zone will be created" + "description": "By default, a Terraform data source will be used to get the public DNS zone. Set to `true` to create the public zone instead." } }, "required": [ @@ -1345,15 +1397,16 @@ }, "Spec.Distribution.Modules.Ingress.DNS.Private": { "type": "object", + "description": "The private DNS zone is used only when `ingress.nginx.type` is `dual`, for exposing infrastructural services only in the private DNS zone.", "additionalProperties": false, "properties": { "name": { "type": "string", - "description": "The name of the private hosted zone" + "description": "The name of the private hosted zone. Example: `internal.fury-demo.sighup.io`." }, "create": { "type": "boolean", - "description": "If true, the private hosted zone will be created" + "description": "By default, a Terraform data source will be used to get the private DNS zone. Set to `true` to create the private zone instead." } }, "required": [ @@ -1364,6 +1417,7 @@ "Spec.Distribution.Modules.Logging": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Logging module.", "properties": { "overrides": { "$ref": "#/$defs/Types.FuryModuleOverrides" @@ -1376,7 +1430,7 @@ "loki", "customOutputs" ], - "description": "selects the logging stack. Choosing none will disable the centralized logging. Choosing opensearch will deploy and configure the Logging Operator and an OpenSearch cluster (can be single or triple for HA) where the logs will be stored. Choosing loki will use a distributed Grafana Loki instead of OpenSearh for storage. Choosing customOuput the Logging Operator will be deployed and installed but with no local storage, you will have to create the needed Outputs and ClusterOutputs to ship the logs to your desired storage." + "description": "Selects the logging stack. Options are:\n- `none`: will disable the centralized logging.\n- `opensearch`: will deploy and configure the Logging Operator and an OpenSearch cluster (can be single or triple for HA) where the logs will be stored.\n- `loki`: will use a distributed Grafana Loki instead of OpenSearh for storage.\n- `customOuputs`: the Logging Operator will be deployed and installed but with no local storage, you will have to create the needed Outputs and ClusterOutputs to ship the logs to your desired storage.\n\nDefault is `opensearch`." }, "opensearch": { "$ref": "#/$defs/Spec.Distribution.Modules.Logging.Opensearch" @@ -1441,14 +1495,14 @@ "single", "triple" ], - "description": "The type of the opensearch, must be ***single*** or ***triple***" + "description": "The type of OpenSearch deployment. One of: `single` for a single replica or `triple` for an HA 3-replicas deployment." }, "resources": { "$ref": "#/$defs/Types.KubeResources" }, "storageSize": { "type": "string", - "description": "The storage size for the opensearch pods" + "description": "The storage size for the OpenSearch volumes." }, "overrides": { "$ref": "#/$defs/Types.FuryModuleComponentOverrides" @@ -1460,6 +1514,7 @@ }, "Spec.Distribution.Modules.Logging.Cerebro": { "type": "object", + "description": "DEPRECATED since KFD v1.26.6, 1.27.5, v1.28.0.", "additionalProperties": false, "properties": { "overrides": { @@ -1470,10 +1525,11 @@ "Spec.Distribution.Modules.Logging.Minio": { "type": "object", "additionalProperties": false, + "description": "Configuration for Logging's MinIO deployment.", "properties": { "storageSize": { "type": "string", - "description": "The PVC size for each minio disk, 6 disks total" + "description": "The PVC size for each MinIO disk, 6 disks total." }, "rootUser": { "type": "object", @@ -1481,11 +1537,11 @@ "properties": { "username": { "type": "string", - "description": "The username of the minio root user" + "description": "The username for the default MinIO root user." }, "password": { "type": "string", - "description": "The password of the minio root user" + "description": "The password for the default MinIO root user." } } }, @@ -1496,10 +1552,12 @@ }, "Spec.Distribution.Modules.Logging.Loki": { "type": "object", + "description": "Configuration for the Loki package.", "additionalProperties": false, "properties": { "backend": { "type": "string", + "description": "The storage backend type for Loki. `minio` will use an in-cluster MinIO deployment for object storage, `externalEndpoint` can be used to point to an external object storage instead of deploying an in-cluster MinIO.", "enum": [ "minio", "externalEndpoint" @@ -1508,26 +1566,27 @@ "externalEndpoint": { "type": "object", "additionalProperties": false, + "description": "Configuration for Loki's external storage backend.", "properties": { "endpoint": { "type": "string", - "description": "The endpoint of the loki external endpoint" + "description": "External S3-compatible endpoint for Loki's storage." }, "insecure": { "type": "boolean", - "description": "If true, the loki external endpoint will be insecure" + "description": "If true, will use HTTP as protocol instead of HTTPS." }, "secretAccessKey": { "type": "string", - "description": "The secret access key of the loki external endpoint" + "description": "The secret access key (password) for the external S3-compatible bucket." }, "accessKeyId": { "type": "string", - "description": "The access key id of the loki external endpoint" + "description": "The access key ID (username) for the external S3-compatible bucket." }, "bucketName": { "type": "string", - "description": "The bucket name of the loki external endpoint" + "description": "The bucket name of the external S3-compatible object storage." } } }, @@ -1539,6 +1598,7 @@ "Spec.Distribution.Modules.Logging.Operator": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Logging Operator.", "properties": { "overrides": { "$ref": "#/$defs/Types.FuryModuleComponentOverrides" @@ -1546,41 +1606,41 @@ } }, "Spec.Distribution.Modules.Logging.CustomOutputs": { - "description": "when using the customOutputs logging type, you need to manually specify the spec of the several Output and ClusterOutputs that the Logging Operator expects to forward the logs collected by the pre-defined flows.", + "description": "When using the `customOutputs` logging type, you need to manually specify the spec of the several `Output` and `ClusterOutputs` that the Logging Operator expects to forward the logs collected by the pre-defined flows.", "type": "object", "additionalProperties": false, "properties": { "audit": { "type": "string", - "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow." + "description": "This value defines where the output from the `audit` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`" }, "events": { "type": "string", - "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow." + "description": "This value defines where the output from the `events` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`" }, "infra": { "type": "string", - "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow." + "description": "This value defines where the output from the `infra` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`" }, "ingressNginx": { "type": "string", - "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow." + "description": "This value defines where the output from the `ingressNginx` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`" }, "kubernetes": { "type": "string", - "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow." + "description": "This value defines where the output from the `kubernetes` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`" }, "systemdCommon": { "type": "string", - "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow." + "description": "This value defines where the output from the `systemdCommon` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`" }, "systemdEtcd": { "type": "string", - "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow." + "description": "This value defines where the output from the `systemdEtcd` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`" }, "errors": { "type": "string", - "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow." + "description": "This value defines where the output from the `errors` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`" } }, "required": [ @@ -1597,7 +1657,7 @@ "Spec.Distribution.Modules.Monitoring": { "type": "object", "additionalProperties": false, - "description": "configuration for the Monitoring module components", + "description": "Configuration for the Monitoring module.", "properties": { "type": { "type": "string", @@ -1607,7 +1667,7 @@ "prometheusAgent", "mimir" ], - "description": "The type of the monitoring, must be ***none***, ***prometheus***, ***prometheusAgent*** or ***mimir***.\n\n- `none`: will disable the whole monitoring stack.\n- `prometheus`: will install Prometheus Operator and a preconfigured Prometheus instance, Alertmanager, a set of alert rules, exporters needed to monitor all the components of the cluster, Grafana and a series of dashboards to view the collected metrics, and more.\n- `prometheusAgent`: wil install Prometheus operator, an instance of Prometheus in Agent mode (no alerting, no queries, no storage), and all the exporters needed to get metrics for the status of the cluster and the workloads. Useful when having a centralized (remote) Prometheus where to ship the metrics and not storing them locally in the cluster.\n- `mimir`: will install the same as the `prometheus` option, and in addition Grafana Mimir that allows for longer retention of metrics and the usage of Object Storage." + "description": "The type of the monitoring, must be `none`, `prometheus`, `prometheusAgent` or `mimir`.\n\n- `none`: will disable the whole monitoring stack.\n- `prometheus`: will install Prometheus Operator and a preconfigured Prometheus instace, Alertmanager, a set of alert rules, exporters needed to monitor all the components of the cluster, Grafana and a series of dashboards to view the collected metrics, and more.\n- `prometheusAgent`: wil install Prometheus operator, an instance of Prometheus in Agent mode (no alerting, no queries, no storage), and all the exporters needed to get metrics for the status of the cluster and the workloads. Useful when having a centralized (remote) Prometheus where to ship the metrics and not storing them locally in the cluster.\n- `mimir`: will install the same as the `prometheus` option, plus Grafana Mimir that allows for longer retention of metrics and the usage of Object Storage.\n\nDefault is `prometheus`." }, "overrides": { "$ref": "#/$defs/Types.FuryModuleOverrides" @@ -1653,15 +1713,15 @@ }, "retentionTime": { "type": "string", - "description": "The retention time for the k8s Prometheus instance." + "description": "The retention time for the `k8s` Prometheus instance." }, "retentionSize": { "type": "string", - "description": "The retention size for the k8s Prometheus instance." + "description": "The retention size for the `k8s` Prometheus instance." }, "storageSize": { "type": "string", - "description": "The storage size for the k8s Prometheus instance." + "description": "The storage size for the `k8s` Prometheus instance." }, "remoteWrite": { "description": "Set this option to ship the collected metrics to a remote Prometheus receiver.\n\n`remoteWrite` is an array of objects that allows configuring the [remoteWrite](https://prometheus.io/docs/specs/remote_write_spec/) options for Prometheus. The objects in the array follow [the same schema as in the prometheus operator](https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.RemoteWriteSpec).", @@ -1694,15 +1754,15 @@ "properties": { "deadManSwitchWebhookUrl": { "type": "string", - "description": "The webhook url to send deadman switch monitoring, for example to use with healthchecks.io" + "description": "The webhook URL to send dead man's switch monitoring, for example to use with healthchecks.io." }, "installDefaultRules": { "type": "boolean", - "description": "If true, the default rules will be installed" + "description": "Set to false to avoid installing the Prometheus rules (alerts) included with the distribution." }, "slackWebhookUrl": { "type": "string", - "description": "The slack webhook url to send alerts" + "description": "The Slack webhook URL where to send the infrastructural and workload alerts to." } } }, @@ -1753,10 +1813,11 @@ "Spec.Distribution.Modules.Monitoring.Mimir": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Mimir package.", "properties": { "retentionTime": { "type": "string", - "description": "The retention time for the mimir pods" + "description": "The retention time for the logs stored in Mimir. Default is `30d`. Value must match the regular expression `[0-9]+(ns|us|µs|ms|s|m|h|d|w|y)` where y = 365 days." }, "backend": { "type": "string", @@ -1764,31 +1825,32 @@ "minio", "externalEndpoint" ], - "description": "The backend for the mimir pods, must be ***minio*** or ***externalEndpoint***" + "description": "The storage backend type for Mimir. `minio` will use an in-cluster MinIO deployment for object storage, `externalEndpoint` can be used to point to an external S3-compatible object storage instead of deploying an in-cluster MinIO." }, "externalEndpoint": { "type": "object", "additionalProperties": false, + "description": "Configuration for Mimir's external storage backend.", "properties": { "endpoint": { "type": "string", - "description": "The endpoint of the external mimir backend" + "description": "External S3-compatible endpoint for Mimir's storage." }, "insecure": { "type": "boolean", - "description": "If true, the external mimir backend will not use tls" + "description": "If true, will use HTTP as protocol instead of HTTPS." }, "secretAccessKey": { "type": "string", - "description": "The secret access key of the external mimir backend" + "description": "The secret access key (password) for the external S3-compatible bucket." }, "accessKeyId": { "type": "string", - "description": "The access key id of the external mimir backend" + "description": "The access key ID (username) for the external S3-compatible bucket." }, "bucketName": { "type": "string", - "description": "The bucket name of the external mimir backend" + "description": "The bucket name of the external S3-compatible object storage." } } }, @@ -1799,11 +1861,12 @@ }, "Spec.Distribution.Modules.Monitoring.Minio": { "type": "object", + "description": "Configuration for Monitoring's MinIO deployment.", "additionalProperties": false, "properties": { "storageSize": { "type": "string", - "description": "The storage size for the minio pods" + "description": "The PVC size for each MinIO disk, 6 disks total." }, "rootUser": { "type": "object", @@ -1811,11 +1874,11 @@ "properties": { "username": { "type": "string", - "description": "The username for the minio root user" + "description": "The username for the default MinIO root user." }, "password": { "type": "string", - "description": "The password for the minio root user" + "description": "The password for the default MinIO root user." } } }, @@ -1827,6 +1890,7 @@ "Spec.Distribution.Modules.Tracing": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Tracing module.", "properties": { "overrides": { "$ref": "#/$defs/Types.FuryModuleOverrides" @@ -1837,7 +1901,7 @@ "none", "tempo" ], - "description": "The type of tracing to use, either ***none*** or ***tempo***" + "description": "The type of tracing to use, either `none` or `tempo`. `none` will disable the Tracing module and `tempo` will install a Grafana Tempo deployment.\n\nDefault is `tempo`." }, "tempo": { "$ref": "#/$defs/Spec.Distribution.Modules.Tracing.Tempo" @@ -1853,10 +1917,11 @@ "Spec.Distribution.Modules.Tracing.Tempo": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Tempo package.", "properties": { "retentionTime": { "type": "string", - "description": "The retention time for the tempo pods" + "description": "The retention time for the traces stored in Tempo." }, "backend": { "type": "string", @@ -1864,31 +1929,32 @@ "minio", "externalEndpoint" ], - "description": "The backend for the tempo pods, must be ***minio*** or ***externalEndpoint***" + "description": "The storage backend type for Tempo. `minio` will use an in-cluster MinIO deployment for object storage, `externalEndpoint` can be used to point to an external S3-compatible object storage instead of deploying an in-cluster MinIO." }, "externalEndpoint": { + "description": "Configuration for Tempo's external storage backend.", "type": "object", "additionalProperties": false, "properties": { "endpoint": { "type": "string", - "description": "The endpoint of the external tempo backend" + "description": "External S3-compatible endpoint for Tempo's storage." }, "insecure": { "type": "boolean", - "description": "If true, the external tempo backend will not use tls" + "description": "If true, will use HTTP as protocol instead of HTTPS." }, "secretAccessKey": { "type": "string", - "description": "The secret access key of the external tempo backend" + "description": "The secret access key (password) for the external S3-compatible bucket." }, "accessKeyId": { "type": "string", - "description": "The access key id of the external tempo backend" + "description": "The access key ID (username) for the external S3-compatible bucket." }, "bucketName": { "type": "string", - "description": "The bucket name of the external tempo backend" + "description": "The bucket name of the external S3-compatible object storage." } } }, @@ -1899,11 +1965,12 @@ }, "Spec.Distribution.Modules.Tracing.Minio": { "type": "object", + "description": "Configuration for Tracing's MinIO deployment.", "additionalProperties": false, "properties": { "storageSize": { "type": "string", - "description": "The storage size for the minio pods" + "description": "The PVC size for each MinIO disk, 6 disks total." }, "rootUser": { "type": "object", @@ -1911,11 +1978,11 @@ "properties": { "username": { "type": "string", - "description": "The username for the minio root user" + "description": "The username for the default MinIO root user." }, "password": { "type": "string", - "description": "The password for the minio root user" + "description": "The password for the default MinIO root user." } } }, @@ -1927,9 +1994,10 @@ "Spec.Distribution.Modules.Networking": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Networking module.", "properties": { "overrides": { - "$ref": "#/$defs/Types.FuryModuleComponentOverrides" + "$ref": "#/$defs/Types.FuryModuleOverrides" }, "tigeraOperator": { "$ref": "#/$defs/Spec.Distribution.Modules.Networking.TigeraOperator" @@ -1948,6 +2016,7 @@ "Spec.Distribution.Modules.Policy": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Policy module.", "properties": { "overrides": { "$ref": "#/$defs/Types.FuryModuleOverrides" @@ -1959,7 +2028,7 @@ "gatekeeper", "kyverno" ], - "description": "The type of security to use, either ***none***, ***gatekeeper*** or ***kyverno***" + "description": "The type of policy enforcement to use, either `none`, `gatekeeper` or `kyverno`.\n\nDefault is `none`." }, "gatekeeper": { "$ref": "#/$defs/Spec.Distribution.Modules.Policy.Gatekeeper" @@ -2005,6 +2074,7 @@ "Spec.Distribution.Modules.Policy.Gatekeeper": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Gatekeeper package.", "properties": { "additionalExcludedNamespaces": { "type": "array", @@ -2020,11 +2090,11 @@ "dryrun", "warn" ], - "description": "The enforcement action to use for the gatekeeper module" + "description": "The default enforcement action to use for the included constraints. `deny` will block the admission when violations to the policies are found, `warn` will show a message to the user but will admit the violating requests and `dryrun` won't give any feedback to the user but it will log the violations." }, "installDefaultPolicies": { "type": "boolean", - "description": "If true, the default policies will be installed" + "description": "Set to `false` to avoid installing the default Gatekeeper policies (constraints templates and constraints) included with the distribution." }, "overrides": { "$ref": "#/$defs/Types.FuryModuleComponentOverrides" @@ -2038,13 +2108,14 @@ "Spec.Distribution.Modules.Policy.Kyverno": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Kyverno package.", "properties": { "additionalExcludedNamespaces": { "type": "array", "items": { "type": "string" }, - "description": "This parameter adds namespaces to Kyverno's exemption list, so it will not enforce the constraints on them." + "description": "This parameter adds namespaces to Kyverno's exemption list, so it will not enforce the policies on them." }, "validationFailureAction": { "type": "string", @@ -2052,11 +2123,11 @@ "Audit", "Enforce" ], - "description": "The validation failure action to use for the kyverno module" + "description": "The validation failure action to use for the policies, `Enforce` will block when a request does not comply with the policies and `Audit` will not block but log when a request does not comply with the policies." }, "installDefaultPolicies": { "type": "boolean", - "description": "If true, the default policies will be installed" + "description": "Set to `false` to avoid installing the default Kyverno policies included with distribution." }, "overrides": { "$ref": "#/$defs/Types.FuryModuleComponentOverrides" @@ -2070,6 +2141,7 @@ "Spec.Distribution.Modules.Dr": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Disaster Recovery module.", "properties": { "overrides": { "$ref": "#/$defs/Types.FuryModuleOverrides" @@ -2080,7 +2152,7 @@ "none", "eks" ], - "description": "The type of the DR, must be ***none*** or ***eks***" + "description": "The type of the Disaster Recovery, must be `none` or `eks`. `none` disables the module and `eks` will install Velero and use an S3 bucket to store the backups.\n\nDefault is `none`." }, "velero": { "$ref": "#/$defs/Spec.Distribution.Modules.Dr.Velero" @@ -2154,12 +2226,12 @@ "properties": { "region": { "$ref": "#/$defs/Types.AwsRegion", - "description": "The region where the velero bucket is located" + "description": "The region where the bucket for Velero will be located." }, "bucketName": { "$ref": "#/$defs/Types.AwsS3BucketName", "maxLength": 49, - "description": "The name of the velero bucket" + "description": "The name of the bucket for Velero." } }, "required": [ @@ -2170,6 +2242,7 @@ "Spec.Distribution.Modules.Auth": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Auth module.", "properties": { "overrides": { "$ref": "#/$defs/Spec.Distribution.Modules.Auth.Overrides" @@ -2179,7 +2252,7 @@ }, "baseDomain": { "type": "string", - "description": "The base domain for the auth module" + "description": "Base domain for the ingresses created by the Auth module (Gangplank, Pomerium, Dex). Notice that when nginx type is dual, these will use the `external` ingress class." }, "pomerium": { "$ref": "#/$defs/Spec.Distribution.Modules.Auth.Pomerium" @@ -2258,10 +2331,11 @@ "Spec.Distribution.Modules.Auth.Overrides": { "type": "object", "additionalProperties": false, + "description": "Override the common configuration with a particular configuration for the Auth module.", "properties": { "nodeSelector": { "$ref": "#/$defs/Types.KubeNodeSelector", - "description": "The node selector to use to place the pods for the auth module" + "description": "Set to override the node selector used to place the pods of the Auth module." }, "tolerations": { "type": [ @@ -2271,10 +2345,11 @@ "items": { "$ref": "#/$defs/Types.KubeToleration" }, - "description": "The tolerations that will be added to the pods for the auth module" + "description": "Set to override the tolerations that will be added to the pods of the Auth module." }, "ingresses": { "type": "object", + "description": "Override the definition of the Auth module ingresses.", "additionalProperties": { "$ref": "#/$defs/Spec.Distribution.Modules.Auth.Overrides.Ingress" } @@ -2287,11 +2362,11 @@ "properties": { "host": { "type": "string", - "description": "The host of the ingress" + "description": "Use this host for the ingress instead of the default one." }, "ingressClass": { "type": "string", - "description": "The ingress class of the ingress" + "description": "Use this ingress class for the ingress instead of the default one." } }, "required": [ @@ -2310,7 +2385,7 @@ "basicAuth", "sso" ], - "description": "The type of the provider, must be ***none***, ***sso*** or ***basicAuth***" + "description": "The type of the Auth provider, options are:\n- `none`: will disable authentication in the infrastructural ingresses.\n- `sso`: will protect the infrastructural ingresses with Pomerium and Dex (SSO) and require authentication before accessing them.\n- `basicAuth`: will protect the infrastructural ingresses with HTTP basic auth (username and password) authentication.\n\nDefault is `none`." }, "basicAuth": { "$ref": "#/$defs/Spec.Distribution.Modules.Auth.Provider.BasicAuth" @@ -2323,14 +2398,15 @@ "Spec.Distribution.Modules.Auth.Provider.BasicAuth": { "type": "object", "additionalProperties": false, + "description": "Configuration for the HTTP Basic Auth provider.", "properties": { "username": { "type": "string", - "description": "The username for the basic auth" + "description": "The username for logging in with the HTTP basic authentication." }, "password": { "type": "string", - "description": "The password for the basic auth" + "description": "The password for logging in with the HTTP basic authentication." } }, "required": [ @@ -2344,14 +2420,15 @@ "Spec.Distribution.Modules.Auth.Dex": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Dex package.", "properties": { "connectors": { "type": "array", - "description": "The connectors for dex" + "description": "A list with each item defining a Dex connector. Follows Dex connectors configuration format: https://dexidp.io/docs/connectors/" }, "additionalStaticClients": { "type": "array", - "description": "The additional static clients for dex" + "description": "Additional static clients defitions that will be added to the default clients included with the distribution in Dex's configuration. Example:\n\n```yaml\nadditionalStaticClients:\n - id: my-custom-client\n name: \"A custom additional static client\"\n redirectURIs:\n - \"https://myapp.tld/redirect\"\n - \"https://alias.tld/oidc-callback\"\n secret: supersecretpassword\n```\nReference: https://dexidp.io/docs/connectors/local/" }, "expiry": { "type": "object", @@ -2630,11 +2707,11 @@ "properties": { "cpu": { "type": "string", - "description": "The cpu request for the prometheus pods" + "description": "The CPU request for the Pod, in cores. Example: `500m`." }, "memory": { "type": "string", - "description": "The memory request for the opensearch pods" + "description": "The memory request for the Pod. Example: `500M`." } } }, @@ -2644,11 +2721,11 @@ "properties": { "cpu": { "type": "string", - "description": "The cpu limit for the opensearch pods" + "description": "The CPU limit for the Pod. Example: `1000m`." }, "memory": { "type": "string", - "description": "The memory limit for the opensearch pods" + "description": "The memory limit for the Pod. Example: `1G`." } } } @@ -2656,11 +2733,12 @@ }, "Types.FuryModuleOverrides": { "type": "object", + "description": "Override the common configuration with a particular configuration for the module.", "additionalProperties": false, "properties": { "nodeSelector": { "$ref": "#/$defs/Types.KubeNodeSelector", - "description": "The node selector to use to place the pods for the dr module" + "description": "Set to override the node selector used to place the pods of the module." }, "tolerations": { "type": [ @@ -2670,7 +2748,7 @@ "items": { "$ref": "#/$defs/Types.KubeToleration" }, - "description": "The tolerations that will be added to the pods for the monitoring module" + "description": "Set to override the tolerations that will be added to the pods of the module." }, "ingresses": { "type": "object", @@ -2686,7 +2764,7 @@ "properties": { "nodeSelector": { "$ref": "#/$defs/Types.KubeNodeSelector", - "description": "The node selector to use to place the pods for the minio module" + "description": "Set to override the node selector used to place the pods of the package." }, "tolerations": { "type": [ @@ -2696,7 +2774,7 @@ "items": { "$ref": "#/$defs/Types.KubeToleration" }, - "description": "The tolerations that will be added to the pods for the cert-manager module" + "description": "Set to override the tolerations that will be added to the pods of the package." } } }, @@ -2706,7 +2784,7 @@ "properties": { "nodeSelector": { "$ref": "#/$defs/Types.KubeNodeSelector", - "description": "The node selector to use to place the pods for the load balancer controller module" + "description": "The node selector to use to place the pods for the load balancer controller module." }, "tolerations": { "type": [ @@ -2716,7 +2794,7 @@ "items": { "$ref": "#/$defs/Types.KubeToleration" }, - "description": "The tolerations that will be added to the pods for the cluster autoscaler module" + "description": "The tolerations that will be added to the pods for the cluster autoscaler module." }, "iamRoleName": { "$ref": "#/$defs/Types.AwsIamRoleName" @@ -2729,15 +2807,15 @@ "properties": { "disableAuth": { "type": "boolean", - "description": "If true, the ingress will not have authentication" + "description": "If true, the ingress will not have authentication even if `.spec.modules.auth.provider.type` is SSO or Basic Auth." }, "host": { "type": "string", - "description": "The host of the ingress" + "description": "Use this host for the ingress instead of the default one." }, "ingressClass": { "type": "string", - "description": "The ingress class of the ingress" + "description": "Use this ingress class for the ingress instead of the default one." } } } diff --git a/schemas/public/kfddistribution-kfd-v1alpha2.json b/schemas/public/kfddistribution-kfd-v1alpha2.json index 3e4451b36..48018b711 100644 --- a/schemas/public/kfddistribution-kfd-v1alpha2.json +++ b/schemas/public/kfddistribution-kfd-v1alpha2.json @@ -1,6 +1,6 @@ { "$schema": "http://json-schema.org/draft-07/schema#", - "description": "", + "description": "KFD modules deployed on top of an existing Kubernetes cluster.", "type": "object", "properties": { "apiVersion": { @@ -34,6 +34,7 @@ "properties": { "name": { "type": "string", + "description": "The name of the cluster. It will also be used as a prefix for all the other resources created.", "minLength": 1, "maxLength": 56 } @@ -48,6 +49,7 @@ "properties": { "distributionVersion": { "type": "string", + "description": "Defines which KFD version will be installed and, in consequence, the Kubernetes version used to create the cluster. It supports git tags and branches. Example: `v1.30.1`.", "minLength": 1 }, "distribution": { @@ -68,7 +70,7 @@ "properties": { "kubeconfig": { "type": "string", - "description": "The kubeconfig file path" + "description": "The path to the kubeconfig file." }, "common": { "$ref": "#/$defs/Spec.Distribution.Common" @@ -134,28 +136,29 @@ "Spec.Distribution.Common": { "type": "object", "additionalProperties": false, + "description": "Common configuration for all the distribution modules.", "properties": { "nodeSelector": { "$ref": "#/$defs/Types.KubeNodeSelector", - "description": "The node selector to use to place the pods for all the KFD modules" + "description": "The node selector to use to place the pods for all the KFD modules. Follows Kubernetes selector format. Example: `node.kubernetes.io/role: infra`." }, "tolerations": { "type": "array", "items": { "$ref": "#/$defs/Types.KubeToleration" }, - "description": "The tolerations that will be added to the pods for all the KFD modules" + "description": "An array with the tolerations that will be added to the pods for all the KFD modules. Follows Kubernetes tolerations format. Example:\n\n```yaml\n- effect: NoSchedule\n key: node.kubernetes.io/role\n value: infra\n```" }, "provider": { "$ref": "#/$defs/Spec.Distribution.Common.Provider" }, "relativeVendorPath": { "type": "string", - "description": "The relative path to the vendor directory, does not need to be changed" + "description": "The relative path to the vendor directory, does not need to be changed." }, "registry": { "type": "string", - "description": "URL of the registry where to pull images from for the Distribution phase. (Default is registry.sighup.io/fury).\n\nNOTE: If plugins are pulling from the default registry, the registry will be replaced for the plugin too." + "description": "URL of the registry where to pull images from for the Distribution phase. (Default is `registry.sighup.io/fury`).\n\nNOTE: If plugins are pulling from the default registry, the registry will be replaced for the plugin too." } } }, @@ -165,7 +168,7 @@ "properties": { "type": { "type": "string", - "description": "The type of the provider" + "description": "The provider type. Don't set. FOR INTERNAL USE ONLY." } }, "required": [ @@ -217,14 +220,15 @@ }, "baseDomain": { "type": "string", - "description": "the base domain used for all the KFD ingresses, if in the nginx dual configuration, it should be the same as the .spec.distribution.modules.ingress.dns.private.name zone" + "description": "The base domain used for all the KFD infrastructural ingresses. If using the nginx dual type, this value should be the same as the domain associated with the `internal` ingress class." }, "nginx": { "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.Nginx", - "description": "Configurations for the nginx ingress controller module" + "description": "Configurations for the Ingress nginx controller package." }, "certManager": { - "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.CertManager" + "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.CertManager", + "description": "Configuration for the cert-manager package. Required even if `ingress.nginx.type` is `none`, cert-manager is used for managing other certificates in the cluster besides the TLS termination certificates for the ingresses." }, "forecastle": { "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.Forecastle" @@ -258,20 +262,21 @@ "Spec.Distribution.Modules.Ingress.Overrides": { "type": "object", "additionalProperties": false, + "description": "Override the common configuration with a particular configuration for the Ingress module.", "properties": { "ingresses": { "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.Overrides.Ingresses" }, "nodeSelector": { "$ref": "#/$defs/Types.KubeNodeSelector", - "description": "The node selector to use to place the pods for the ingress module" + "description": "Set to override the node selector used to place the pods of the Ingress module." }, "tolerations": { "type": "array", "items": { "$ref": "#/$defs/Types.KubeToleration" }, - "description": "The tolerations that will be added to the pods for the ingress module" + "description": "Set to override the tolerations that will be added to the pods of the Ingress module." } } }, @@ -304,7 +309,7 @@ "single", "dual" ], - "description": "The type of the nginx ingress controller, must be ***none***, ***single*** or ***dual***" + "description": "The type of the Ingress nginx controller, options are:\n- `none`: no ingress controller will be installed and no infrastructural ingresses will be created.\n- `single`: a single ingress controller with ingress class `nginx` will be installed to manage all the ingress resources, infrastructural ingresses will be created.\n- `dual`: two independent ingress controllers will be installed, one for the `internal` ingress class intended for private ingresses and one for the `external` ingress class intended for public ingresses. KFD infrastructural ingresses wil use the `internal` ingress class when using the dual type.\n\nDefault is `single`." }, "tls": { "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.Nginx.TLS" @@ -328,7 +333,7 @@ "secret", "none" ], - "description": "The provider of the TLS certificate, must be ***none***, ***certManager*** or ***secret***" + "description": "The provider of the TLS certificates for the ingresses, one of: `none`, `certManager`, or `secret`." }, "secret": { "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.Nginx.TLS.Secret" @@ -353,16 +358,19 @@ "Spec.Distribution.Modules.Ingress.Nginx.TLS.Secret": { "type": "object", "additionalProperties": false, + "description": "Kubernetes TLS secret for the ingresses TLS certificate.", "properties": { "cert": { "type": "string", - "description": "The certificate file content or you can use the file notation to get the content from a file" + "description": "The certificate file's content. You can use the `\"{file://}\"` notation to get the content from a file." }, "key": { - "type": "string" + "type": "string", + "description": "The signing key file's content. You can use the `\"{file://}\"` notation to get the content from a file." }, "ca": { - "type": "string" + "type": "string", + "description": "The Certificate Authority certificate file's content. You can use the `\"{file://}\"` notation to get the content from a file." } }, "required": [ @@ -374,6 +382,7 @@ "Spec.Distribution.Modules.Ingress.CertManager": { "type": "object", "additionalProperties": false, + "description": "Configuration for the cert-manager package. Required even if `ingress.nginx.type` is `none`, cert-manager is used for managing other certificates in the cluster besides the TLS termination certificates for the ingresses.", "properties": { "clusterIssuer": { "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.CertManager.ClusterIssuer" @@ -389,26 +398,27 @@ "Spec.Distribution.Modules.Ingress.CertManager.ClusterIssuer": { "type": "object", "additionalProperties": false, + "description": "Configuration for the cert-manager's ACME clusterIssuer used to request certificates from Let's Encrypt.", "properties": { "name": { "type": "string", - "description": "The name of the cluster issuer" + "description": "Name of the clusterIssuer." }, "email": { "type": "string", "format": "email", - "description": "The email of the cluster issuer" + "description": "The email address to use during the certificate issuing process." }, "type": { "type": "string", "enum": [ "http01" ], - "description": "The type of the cluster issuer, must be ***http01***" + "description": "The type of the clusterIssuer. Only `http01` challenge is supported for KFDDistribution kind. See solvers for arbitrary configurations." }, "solvers": { "type": "array", - "description": "The custom solvers configurations" + "description": "List of challenge solvers to use instead of the default one for the `http01` challenge." } }, "required": [ @@ -431,6 +441,7 @@ "Spec.Distribution.Modules.Logging": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Logging module.", "properties": { "overrides": { "$ref": "#/$defs/Types.FuryModuleOverrides" @@ -443,7 +454,7 @@ "loki", "customOutputs" ], - "description": "selects the logging stack. Choosing none will disable the centralized logging. Choosing opensearch will deploy and configure the Logging Operator and an OpenSearch cluster (can be single or triple for HA) where the logs will be stored. Choosing loki will use a distributed Grafana Loki instead of OpenSearh for storage. Choosing customOuput the Logging Operator will be deployed and installed but with no local storage, you will have to create the needed Outputs and ClusterOutputs to ship the logs to your desired storage." + "description": "Selects the logging stack. Options are:\n- `none`: will disable the centralized logging.\n- `opensearch`: will deploy and configure the Logging Operator and an OpenSearch cluster (can be single or triple for HA) where the logs will be stored.\n- `loki`: will use a distributed Grafana Loki instead of OpenSearh for storage.\n- `customOuputs`: the Logging Operator will be deployed and installed but with no local storage, you will have to create the needed Outputs and ClusterOutputs to ship the logs to your desired storage.\n\nDefault is `opensearch`." }, "opensearch": { "$ref": "#/$defs/Spec.Distribution.Modules.Logging.Opensearch" @@ -508,14 +519,14 @@ "single", "triple" ], - "description": "The type of the opensearch, must be ***single*** or ***triple***" + "description": "The type of OpenSearch deployment. One of: `single` for a single replica or `triple` for an HA 3-replicas deployment." }, "resources": { "$ref": "#/$defs/Types.KubeResources" }, "storageSize": { "type": "string", - "description": "The storage size for the opensearch pods" + "description": "The storage size for the OpenSearch volumes." }, "overrides": { "$ref": "#/$defs/Types.FuryModuleComponentOverrides" @@ -527,6 +538,7 @@ }, "Spec.Distribution.Modules.Logging.Cerebro": { "type": "object", + "description": "DEPRECATED since KFD v1.26.6, 1.27.5, v1.28.0.", "additionalProperties": false, "properties": { "overrides": { @@ -537,10 +549,11 @@ "Spec.Distribution.Modules.Logging.Minio": { "type": "object", "additionalProperties": false, + "description": "Configuration for Logging's MinIO deployment.", "properties": { "storageSize": { "type": "string", - "description": "The PVC size for each minio disk, 6 disks total" + "description": "The PVC size for each MinIO disk, 6 disks total." }, "rootUser": { "type": "object", @@ -548,11 +561,11 @@ "properties": { "username": { "type": "string", - "description": "The username of the minio root user" + "description": "The username for the default MinIO root user." }, "password": { "type": "string", - "description": "The password of the minio root user" + "description": "The password for the default MinIO root user." } } }, @@ -563,10 +576,12 @@ }, "Spec.Distribution.Modules.Logging.Loki": { "type": "object", + "description": "Configuration for the Loki package.", "additionalProperties": false, "properties": { "backend": { "type": "string", + "description": "The storage backend type for Loki. `minio` will use an in-cluster MinIO deployment for object storage, `externalEndpoint` can be used to point to an external object storage instead of deploying an in-cluster MinIO.", "enum": [ "minio", "externalEndpoint" @@ -575,26 +590,27 @@ "externalEndpoint": { "type": "object", "additionalProperties": false, + "description": "Configuration for Loki's external storage backend.", "properties": { "endpoint": { "type": "string", - "description": "The endpoint of the loki external endpoint" + "description": "External S3-compatible endpoint for Loki's storage." }, "insecure": { "type": "boolean", - "description": "If true, the loki external endpoint will be insecure" + "description": "If true, will use HTTP as protocol instead of HTTPS." }, "secretAccessKey": { "type": "string", - "description": "The secret access key of the loki external endpoint" + "description": "The secret access key (password) for the external S3-compatible bucket." }, "accessKeyId": { "type": "string", - "description": "The access key id of the loki external endpoint" + "description": "The access key ID (username) for the external S3-compatible bucket." }, "bucketName": { "type": "string", - "description": "The bucket name of the loki external endpoint" + "description": "The bucket name of the external S3-compatible object storage." } } }, @@ -606,6 +622,7 @@ "Spec.Distribution.Modules.Logging.Operator": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Logging Operator.", "properties": { "overrides": { "$ref": "#/$defs/Types.FuryModuleComponentOverrides" @@ -613,41 +630,41 @@ } }, "Spec.Distribution.Modules.Logging.CustomOutputs": { - "description": "when using the customOutputs logging type, you need to manually specify the spec of the several Output and ClusterOutputs that the Logging Operator expects to forward the logs collected by the pre-defined flows.", + "description": "When using the `customOutputs` logging type, you need to manually specify the spec of the several `Output` and `ClusterOutputs` that the Logging Operator expects to forward the logs collected by the pre-defined flows.", "type": "object", "additionalProperties": false, "properties": { "audit": { "type": "string", - "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow." + "description": "This value defines where the output from the `audit` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`" }, "events": { "type": "string", - "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow." + "description": "This value defines where the output from the `events` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`" }, "infra": { "type": "string", - "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow." + "description": "This value defines where the output from the `infra` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`" }, "ingressNginx": { "type": "string", - "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow." + "description": "This value defines where the output from the `ingressNginx` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`" }, "kubernetes": { "type": "string", - "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow." + "description": "This value defines where the output from the `kubernetes` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`" }, "systemdCommon": { "type": "string", - "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow." + "description": "This value defines where the output from the `systemdCommon` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`" }, "systemdEtcd": { "type": "string", - "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow." + "description": "This value defines where the output from the `systemdEtcd` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`" }, "errors": { "type": "string", - "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow." + "description": "This value defines where the output from the `errors` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`" } }, "required": [ @@ -664,7 +681,7 @@ "Spec.Distribution.Modules.Monitoring": { "type": "object", "additionalProperties": false, - "description": "configuration for the Monitoring module components", + "description": "Configuration for the Monitoring module.", "properties": { "type": { "type": "string", @@ -674,7 +691,7 @@ "prometheusAgent", "mimir" ], - "description": "The type of the monitoring, must be ***none***, ***prometheus***, ***prometheusAgent*** or ***mimir***.\n\n- `none`: will disable the whole monitoring stack.\n- `prometheus`: will install Prometheus Operator and a preconfigured Prometheus instace, Alertmanager, a set of alert rules, exporters needed to monitor all the components of the cluster, Grafana and a series of dashboards to view the collected metrics, and more.\n- `prometheusAgent`: wil install Prometheus operator, an instance of Prometheus in Agent mode (no alerting, no queries, no storage), and all the exporters needed to get metrics for the status of the cluster and the workloads. Useful when having a centralized (remote) Prometheus where to ship the metrics and not storing them locally in the cluster.\n- `mimir`: will install the same as the `prometheus` option, and in addition Grafana Mimir that allows for longer retention of metrics and the usage of Object Storage." + "description": "The type of the monitoring, must be `none`, `prometheus`, `prometheusAgent` or `mimir`.\n\n- `none`: will disable the whole monitoring stack.\n- `prometheus`: will install Prometheus Operator and a preconfigured Prometheus instace, Alertmanager, a set of alert rules, exporters needed to monitor all the components of the cluster, Grafana and a series of dashboards to view the collected metrics, and more.\n- `prometheusAgent`: wil install Prometheus operator, an instance of Prometheus in Agent mode (no alerting, no queries, no storage), and all the exporters needed to get metrics for the status of the cluster and the workloads. Useful when having a centralized (remote) Prometheus where to ship the metrics and not storing them locally in the cluster.\n- `mimir`: will install the same as the `prometheus` option, plus Grafana Mimir that allows for longer retention of metrics and the usage of Object Storage.\n\nDefault is `prometheus`." }, "overrides": { "$ref": "#/$defs/Types.FuryModuleOverrides" @@ -720,15 +737,15 @@ }, "retentionTime": { "type": "string", - "description": "The retention time for the K8s Prometheus instance." + "description": "The retention time for the `k8s` Prometheus instance." }, "retentionSize": { "type": "string", - "description": "The retention size for the k8s Prometheus instance." + "description": "The retention size for the `k8s` Prometheus instance." }, "storageSize": { "type": "string", - "description": "The storage size for the k8s Prometheus instance." + "description": "The storage size for the `k8s` Prometheus instance." }, "remoteWrite": { "description": "Set this option to ship the collected metrics to a remote Prometheus receiver.\n\n`remoteWrite` is an array of objects that allows configuring the [remoteWrite](https://prometheus.io/docs/specs/remote_write_spec/) options for Prometheus. The objects in the array follow [the same schema as in the prometheus operator](https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.RemoteWriteSpec).", @@ -761,15 +778,15 @@ "properties": { "deadManSwitchWebhookUrl": { "type": "string", - "description": "The webhook url to send deadman switch monitoring, for example to use with healthchecks.io" + "description": "The webhook URL to send dead man's switch monitoring, for example to use with healthchecks.io." }, "installDefaultRules": { "type": "boolean", - "description": "If true, the default rules will be installed" + "description": "Set to false to avoid installing the Prometheus rules (alerts) included with the distribution." }, "slackWebhookUrl": { "type": "string", - "description": "The slack webhook url to send alerts" + "description": "The Slack webhook URL where to send the infrastructural and workload alerts to." } } }, @@ -820,10 +837,11 @@ "Spec.Distribution.Modules.Monitoring.Mimir": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Mimir package.", "properties": { "retentionTime": { "type": "string", - "description": "The retention time for the mimir pods" + "description": "The retention time for the logs stored in Mimir. Default is `30d`. Value must match the regular expression `[0-9]+(ns|us|µs|ms|s|m|h|d|w|y)` where y = 365 days." }, "backend": { "type": "string", @@ -831,31 +849,32 @@ "minio", "externalEndpoint" ], - "description": "The backend for the mimir pods, must be ***minio*** or ***externalEndpoint***" + "description": "The storage backend type for Mimir. `minio` will use an in-cluster MinIO deployment for object storage, `externalEndpoint` can be used to point to an external S3-compatible object storage instead of deploying an in-cluster MinIO." }, "externalEndpoint": { "type": "object", "additionalProperties": false, + "description": "Configuration for Mimir's external storage backend.", "properties": { "endpoint": { "type": "string", - "description": "The endpoint of the external mimir backend" + "description": "External S3-compatible endpoint for Mimir's storage." }, "insecure": { "type": "boolean", - "description": "If true, the external mimir backend will not use tls" + "description": "If true, will use HTTP as protocol instead of HTTPS." }, "secretAccessKey": { "type": "string", - "description": "The secret access key of the external mimir backend" + "description": "The secret access key (password) for the external S3-compatible bucket." }, "accessKeyId": { "type": "string", - "description": "The access key id of the external mimir backend" + "description": "The access key ID (username) for the external S3-compatible bucket." }, "bucketName": { "type": "string", - "description": "The bucket name of the external mimir backend" + "description": "The bucket name of the external S3-compatible object storage." } } }, @@ -866,11 +885,12 @@ }, "Spec.Distribution.Modules.Monitoring.Minio": { "type": "object", + "description": "Configuration for Monitoring's MinIO deployment.", "additionalProperties": false, "properties": { "storageSize": { "type": "string", - "description": "The storage size for the minio pods" + "description": "The PVC size for each MinIO disk, 6 disks total." }, "rootUser": { "type": "object", @@ -878,11 +898,11 @@ "properties": { "username": { "type": "string", - "description": "The username for the minio root user" + "description": "The username for the default MinIO root user." }, "password": { "type": "string", - "description": "The password for the minio root user" + "description": "The password for the default MinIO root user." } } }, @@ -894,6 +914,7 @@ "Spec.Distribution.Modules.Tracing": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Tracing module.", "properties": { "overrides": { "$ref": "#/$defs/Types.FuryModuleOverrides" @@ -904,7 +925,7 @@ "none", "tempo" ], - "description": "The type of tracing to use, either ***none*** or ***tempo***" + "description": "The type of tracing to use, either `none` or `tempo`. `none` will disable the Tracing module and `tempo` will install a Grafana Tempo deployment.\n\nDefault is `tempo`." }, "tempo": { "$ref": "#/$defs/Spec.Distribution.Modules.Tracing.Tempo" @@ -920,10 +941,11 @@ "Spec.Distribution.Modules.Tracing.Tempo": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Tempo package.", "properties": { "retentionTime": { "type": "string", - "description": "The retention time for the tempo pods" + "description": "The retention time for the traces stored in Tempo." }, "backend": { "type": "string", @@ -931,31 +953,32 @@ "minio", "externalEndpoint" ], - "description": "The backend for the tempo pods, must be ***minio*** or ***externalEndpoint***" + "description": "The storage backend type for Tempo. `minio` will use an in-cluster MinIO deployment for object storage, `externalEndpoint` can be used to point to an external S3-compatible object storage instead of deploying an in-cluster MinIO." }, "externalEndpoint": { + "description": "Configuration for Tempo's external storage backend.", "type": "object", "additionalProperties": false, "properties": { "endpoint": { "type": "string", - "description": "The endpoint of the external tempo backend" + "description": "External S3-compatible endpoint for Tempo's storage." }, "insecure": { "type": "boolean", - "description": "If true, the external tempo backend will not use tls" + "description": "If true, will use HTTP as protocol instead of HTTPS." }, "secretAccessKey": { "type": "string", - "description": "The secret access key of the external tempo backend" + "description": "The secret access key (password) for the external S3-compatible bucket." }, "accessKeyId": { "type": "string", - "description": "The access key id of the external tempo backend" + "description": "The access key ID (username) for the external S3-compatible bucket." }, "bucketName": { "type": "string", - "description": "The bucket name of the external tempo backend" + "description": "The bucket name of the external S3-compatible object storage." } } }, @@ -966,11 +989,12 @@ }, "Spec.Distribution.Modules.Tracing.Minio": { "type": "object", + "description": "Configuration for Tracing's MinIO deployment.", "additionalProperties": false, "properties": { "storageSize": { "type": "string", - "description": "The storage size for the minio pods" + "description": "The PVC size for each MinIO disk, 6 disks total." }, "rootUser": { "type": "object", @@ -978,11 +1002,11 @@ "properties": { "username": { "type": "string", - "description": "The username for the minio root user" + "description": "The username for the default MinIO root user." }, "password": { "type": "string", - "description": "The password for the minio root user" + "description": "The password for the default MinIO root user." } } }, @@ -994,6 +1018,7 @@ "Spec.Distribution.Modules.Networking": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Networking module.", "properties": { "overrides": { "$ref": "#/$defs/Types.FuryModuleOverrides" @@ -1011,7 +1036,7 @@ "calico", "cilium" ], - "description": "The type of networking to use, either ***none***, ***calico*** or ***cilium***" + "description": "The type of CNI plugin to use, either `none`, `calico` (Tigera Operator) or `cilium`." } }, "required": [ @@ -1048,10 +1073,12 @@ "additionalProperties": false, "properties": { "podCidr": { - "$ref": "#/$defs/Types.Cidr" + "$ref": "#/$defs/Types.Cidr", + "description": "Allows specifing a CIDR for the Pods network different from `.spec.kubernetes.podCidr`. If not set the default is to use `.spec.kubernetes.podCidr`." }, "maskSize": { - "type": "string" + "type": "string", + "description": "The mask size to use for the Pods network on each node." }, "overrides": { "$ref": "#/$defs/Types.FuryModuleComponentOverrides" @@ -1065,6 +1092,7 @@ "Spec.Distribution.Modules.Policy": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Policy module.", "properties": { "overrides": { "$ref": "#/$defs/Types.FuryModuleOverrides" @@ -1076,7 +1104,7 @@ "gatekeeper", "kyverno" ], - "description": "The type of security to use, either ***none***, ***gatekeeper*** or ***kyverno***" + "description": "The type of policy enforcement to use, either `none`, `gatekeeper` or `kyverno`.\n\nDefault is `none`." }, "gatekeeper": { "$ref": "#/$defs/Spec.Distribution.Modules.Policy.Gatekeeper" @@ -1122,6 +1150,7 @@ "Spec.Distribution.Modules.Policy.Gatekeeper": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Gatekeeper package.", "properties": { "additionalExcludedNamespaces": { "type": "array", @@ -1137,11 +1166,11 @@ "dryrun", "warn" ], - "description": "The enforcement action to use for the gatekeeper module" + "description": "The default enforcement action to use for the included constraints. `deny` will block the admission when violations to the policies are found, `warn` will show a message to the user but will admit the violating requests and `dryrun` won't give any feedback to the user but it will log the violations." }, "installDefaultPolicies": { "type": "boolean", - "description": "If true, the default policies will be installed" + "description": "Set to `false` to avoid installing the default Gatekeeper policies (constraints templates and constraints) included with the distribution." }, "overrides": { "$ref": "#/$defs/Types.FuryModuleComponentOverrides" @@ -1155,13 +1184,14 @@ "Spec.Distribution.Modules.Policy.Kyverno": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Kyverno package.", "properties": { "additionalExcludedNamespaces": { "type": "array", "items": { "type": "string" }, - "description": "This parameter adds namespaces to Kyverno's exemption list, so it will not enforce the constraints on them." + "description": "This parameter adds namespaces to Kyverno's exemption list, so it will not enforce the policies on them." }, "validationFailureAction": { "type": "string", @@ -1169,11 +1199,11 @@ "Audit", "Enforce" ], - "description": "The validation failure action to use for the kyverno module" + "description": "The validation failure action to use for the policies, `Enforce` will block when a request does not comply with the policies and `Audit` will not block but log when a request does not comply with the policies." }, "installDefaultPolicies": { "type": "boolean", - "description": "If true, the default policies will be installed" + "description": "Set to `false` to avoid installing the default Kyverno policies included with distribution." }, "overrides": { "$ref": "#/$defs/Types.FuryModuleComponentOverrides" @@ -1187,6 +1217,7 @@ "Spec.Distribution.Modules.Dr": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Disaster Recovery module.", "properties": { "overrides": { "$ref": "#/$defs/Types.FuryModuleOverrides" @@ -1197,7 +1228,7 @@ "none", "on-premises" ], - "description": "The type of the DR, must be ***none*** or ***on-premises***" + "description": "The type of the Disaster Recovery, must be `none` or `on-premises`. `none` disables the module and `on-premises` will install Velero and an optional MinIO deployment.\n\nDefault is `none`." }, "velero": { "$ref": "#/$defs/Spec.Distribution.Modules.Dr.Velero" @@ -1223,6 +1254,7 @@ "Spec.Distribution.Modules.Dr.Velero": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Velero package.", "properties": { "backend": { "type": "string", @@ -1297,6 +1329,7 @@ "Spec.Distribution.Modules.Auth": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Auth module.", "properties": { "overrides": { "$ref": "#/$defs/Spec.Distribution.Modules.Auth.Overrides" @@ -1306,7 +1339,7 @@ }, "baseDomain": { "type": "string", - "description": "The base domain for the auth module" + "description": "Base domain for the ingresses created by the Auth module (Gangplank, Pomerium, Dex). Notice that when nginx type is dual, these will use the `external` ingress class." }, "pomerium": { "$ref": "#/$defs/Spec.Distribution.Modules.Auth.Pomerium" @@ -1385,10 +1418,11 @@ "Spec.Distribution.Modules.Auth.Overrides": { "type": "object", "additionalProperties": false, + "description": "Override the common configuration with a particular configuration for the Auth module.", "properties": { "nodeSelector": { "$ref": "#/$defs/Types.KubeNodeSelector", - "description": "The node selector to use to place the pods for the auth module" + "description": "Set to override the node selector used to place the pods of the Auth module." }, "tolerations": { "type": [ @@ -1398,7 +1432,7 @@ "items": { "$ref": "#/$defs/Types.KubeToleration" }, - "description": "The tolerations that will be added to the pods for the auth module" + "description": "Set to override the tolerations that will be added to the pods of the Auth module." }, "ingresses": { "type": "object", @@ -1414,11 +1448,11 @@ "properties": { "host": { "type": "string", - "description": "The host of the ingress" + "description": "Use this host for the ingress instead of the default one." }, "ingressClass": { "type": "string", - "description": "The ingress class of the ingress" + "description": "Use this ingress class for the ingress instead of the default one." } }, "required": [ @@ -1437,7 +1471,7 @@ "basicAuth", "sso" ], - "description": "The type of the provider, must be ***none***, ***sso*** or ***basicAuth***" + "description": "The type of the Auth provider, options are:\n- `none`: will disable authentication in the infrastructural ingresses.\n- `sso`: will protect the infrastructural ingresses with Pomerium and Dex (SSO) and require authentication before accessing them.\n- `basicAuth`: will protect the infrastructural ingresses with HTTP basic auth (username and password) authentication.\n\nDefault is `none`." }, "basicAuth": { "$ref": "#/$defs/Spec.Distribution.Modules.Auth.Provider.BasicAuth" @@ -1450,14 +1484,15 @@ "Spec.Distribution.Modules.Auth.Provider.BasicAuth": { "type": "object", "additionalProperties": false, + "description": "Configuration for the HTTP Basic Auth provider.", "properties": { "username": { "type": "string", - "description": "The username for the basic auth" + "description": "The username for logging in with the HTTP basic authentication." }, "password": { "type": "string", - "description": "The password for the basic auth" + "description": "The password for logging in with the HTTP basic authentication." } }, "required": [ @@ -1471,14 +1506,15 @@ "Spec.Distribution.Modules.Auth.Dex": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Dex package.", "properties": { "connectors": { "type": "array", - "description": "The connectors for dex" + "description": "A list with each item defining a Dex connector. Follows Dex connectors configuration format: https://dexidp.io/docs/connectors/" }, "additionalStaticClients": { "type": "array", - "description": "The additional static clients for dex" + "description": "Additional static clients defitions that will be added to the default clients included with the distribution in Dex's configuration. Example:\n\n```yaml\nadditionalStaticClients:\n - id: my-custom-client\n name: \"A custom additional static client\"\n redirectURIs:\n - \"https://myapp.tld/redirect\"\n - \"https://alias.tld/oidc-callback\"\n secret: supersecretpassword\n```\nReference: https://dexidp.io/docs/connectors/local/" }, "expiry": { "type": "object", @@ -1542,11 +1578,29 @@ } }, "Types.KubeTaints": { - "type": "array", - "items": { - "type": "string", - "pattern": "^([a-zA-Z0-9\\-\\.\\/]+)=(\\w+):(NoSchedule|PreferNoSchedule|NoExecute)$" - } + "type": "object", + "additionalProperties": false, + "properties": { + "effect": { + "type": "string", + "enum": [ + "NoSchedule", + "PreferNoSchedule", + "NoExecute" + ] + }, + "key": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "required": [ + "effect", + "key", + "value" + ] }, "Types.KubeNodeSelector": { "type": [ @@ -1612,11 +1666,11 @@ "properties": { "cpu": { "type": "string", - "description": "The cpu request for the prometheus pods" + "description": "The CPU request for the Pod, in cores. Example: `500m`." }, "memory": { "type": "string", - "description": "The memory request for the opensearch pods" + "description": "The memory request for the Pod. Example: `500M`." } } }, @@ -1626,11 +1680,11 @@ "properties": { "cpu": { "type": "string", - "description": "The cpu limit for the loki pods" + "description": "The CPU limit for the Pod. Example: `1000m`." }, "memory": { "type": "string", - "description": "The memory limit for the opensearch pods" + "description": "The memory limit for the Pod. Example: `1G`." } } } @@ -1638,11 +1692,12 @@ }, "Types.FuryModuleOverrides": { "type": "object", + "description": "Override the common configuration with a particular configuration for the module.", "additionalProperties": false, "properties": { "nodeSelector": { "$ref": "#/$defs/Types.KubeNodeSelector", - "description": "The node selector to use to place the pods for the security module" + "description": "Set to override the node selector used to place the pods of the module." }, "tolerations": { "type": [ @@ -1652,7 +1707,7 @@ "items": { "$ref": "#/$defs/Types.KubeToleration" }, - "description": "The tolerations that will be added to the pods for the monitoring module" + "description": "Set to override the tolerations that will be added to the pods of the module." }, "ingresses": { "type": "object", @@ -1668,7 +1723,7 @@ "properties": { "nodeSelector": { "$ref": "#/$defs/Types.KubeNodeSelector", - "description": "The node selector to use to place the pods for the minio module" + "description": "Set to override the node selector used to place the pods of the package." }, "tolerations": { "type": [ @@ -1678,7 +1733,7 @@ "items": { "$ref": "#/$defs/Types.KubeToleration" }, - "description": "The tolerations that will be added to the pods for the cert-manager module" + "description": "Set to override the tolerations that will be added to the pods of the package." } } }, @@ -1688,15 +1743,15 @@ "properties": { "disableAuth": { "type": "boolean", - "description": "If true, the ingress will not have authentication" + "description": "If true, the ingress will not have authentication even if `.spec.modules.auth.provider.type` is SSO or Basic Auth." }, "host": { "type": "string", - "description": "The host of the ingress" + "description": "Use this host for the ingress instead of the default one." }, "ingressClass": { "type": "string", - "description": "The ingress class of the ingress" + "description": "Use this ingress class for the ingress instead of the default one." } } } diff --git a/schemas/public/onpremises-kfd-v1alpha2.json b/schemas/public/onpremises-kfd-v1alpha2.json index cc808f71e..fa9ad6c34 100644 --- a/schemas/public/onpremises-kfd-v1alpha2.json +++ b/schemas/public/onpremises-kfd-v1alpha2.json @@ -1,6 +1,6 @@ { "$schema": "http://json-schema.org/draft-07/schema#", - "description": "", + "description": "A KFD Cluster deployed on top of a set of existing VMs.", "type": "object", "properties": { "apiVersion": { @@ -49,7 +49,7 @@ "properties": { "distributionVersion": { "type": "string", - "description": "Defines which KFD version will be installed and, in consequence, the Kubernetes version used to create the cluster. It supports git tags and branches. Example: v1.30.1.", + "description": "Defines which KFD version will be installed and, in consequence, the Kubernetes version used to create the cluster. It supports git tags and branches. Example: `v1.30.1`.", "minLength": 1 }, "kubernetes": { @@ -708,7 +708,7 @@ "properties": { "nodeSelector": { "$ref": "#/$defs/Types.KubeNodeSelector", - "description": "The node selector to use to place the pods for all the KFD modules. Follows Kubernetes selector format. Example: `node.kubernetes.io/role: infra`" + "description": "The node selector to use to place the pods for all the KFD modules. Follows Kubernetes selector format. Example: `node.kubernetes.io/role: infra`." }, "tolerations": { "type": "array", @@ -726,7 +726,7 @@ }, "registry": { "type": "string", - "description": "URL of the registry where to pull images from for the Distribution phase. (Default is `registry.sighup.io/fury`)." + "description": "URL of the registry where to pull images from for the Distribution phase. (Default is `registry.sighup.io/fury`).\n\nNOTE: If plugins are pulling from the default registry, the registry will be replaced for the plugin too." } } }, @@ -792,7 +792,7 @@ }, "nginx": { "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.Nginx", - "description": "Configurations for the nginx ingress controller package." + "description": "Configurations for the Ingress nginx controller package." }, "certManager": { "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.CertManager", @@ -837,14 +837,14 @@ }, "nodeSelector": { "$ref": "#/$defs/Types.KubeNodeSelector", - "description": "Set to override the node selector used to place the pods of the Ingress module" + "description": "Set to override the node selector used to place the pods of the Ingress module." }, "tolerations": { "type": "array", "items": { "$ref": "#/$defs/Types.KubeToleration" }, - "description": "Set to override the tolerations that will be added to the pods of the Ingress module" + "description": "Set to override the tolerations that will be added to the pods of the Ingress module." } } }, @@ -877,7 +877,7 @@ "single", "dual" ], - "description": "The type of the nginx ingress controller, options are:\n- `none`: no ingress controller will be installed and no infrastructural ingresses will be created.\n- `single`: a single ingress controller with ingress class `nginx` will be installed to manage all the ingress resources, infrastructural ingresses will be created.\n- `dual`: two independent ingress controllers will be installed, one for the `internal` ingress class intended for private ingresses and one for the `external` ingress class intended for public ingresses. KFD infrastructural ingresses wil use the `internal` ingress class when using the dual type." + "description": "The type of the Ingress nginx controller, options are:\n- `none`: no ingress controller will be installed and no infrastructural ingresses will be created.\n- `single`: a single ingress controller with ingress class `nginx` will be installed to manage all the ingress resources, infrastructural ingresses will be created.\n- `dual`: two independent ingress controllers will be installed, one for the `internal` ingress class intended for private ingresses and one for the `external` ingress class intended for public ingresses. KFD infrastructural ingresses wil use the `internal` ingress class when using the dual type.\n\nDefault is `single`." }, "tls": { "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.Nginx.TLS" @@ -970,7 +970,7 @@ "properties": { "name": { "type": "string", - "description": "Name of the clusterIssuer" + "description": "Name of the clusterIssuer." }, "email": { "type": "string", @@ -1022,7 +1022,7 @@ "loki", "customOutputs" ], - "description": "Selects the logging stack. Options are:\n- `none`: will disable the centralized logging.\n- `opensearch`: will deploy and configure the Logging Operator and an OpenSearch cluster (can be single or triple for HA) where the logs will be stored.\n- `loki`: will use a distributed Grafana Loki instead of OpenSearh for storage.\n- `customOuputs`: the Logging Operator will be deployed and installed but with no local storage, you will have to create the needed Outputs and ClusterOutputs to ship the logs to your desired storage." + "description": "Selects the logging stack. Options are:\n- `none`: will disable the centralized logging.\n- `opensearch`: will deploy and configure the Logging Operator and an OpenSearch cluster (can be single or triple for HA) where the logs will be stored.\n- `loki`: will use a distributed Grafana Loki instead of OpenSearh for storage.\n- `customOuputs`: the Logging Operator will be deployed and installed but with no local storage, you will have to create the needed Outputs and ClusterOutputs to ship the logs to your desired storage.\n\nDefault is `opensearch`." }, "opensearch": { "$ref": "#/$defs/Spec.Distribution.Modules.Logging.Opensearch" @@ -1106,7 +1106,7 @@ }, "Spec.Distribution.Modules.Logging.Cerebro": { "type": "object", - "description": "DEPRECATED in latest versions of KFD.", + "description": "DEPRECATED since KFD v1.26.6, 1.27.5, v1.28.0.", "additionalProperties": false, "properties": { "overrides": { @@ -1259,7 +1259,7 @@ "prometheusAgent", "mimir" ], - "description": "The type of the monitoring, must be `none`, `prometheus`, `prometheusAgent` or `mimir`.\n\n- `none`: will disable the whole monitoring stack.\n- `prometheus`: will install Prometheus Operator and a preconfigured Prometheus instace, Alertmanager, a set of alert rules, exporters needed to monitor all the components of the cluster, Grafana and a series of dashboards to view the collected metrics, and more.\n- `prometheusAgent`: wil install Prometheus operator, an instance of Prometheus in Agent mode (no alerting, no queries, no storage), and all the exporters needed to get metrics for the status of the cluster and the workloads. Useful when having a centralized (remote) Prometheus where to ship the metrics and not storing them locally in the cluster.\n- `mimir`: will install the same as the `prometheus` option, plus Grafana Mimir that allows for longer retention of metrics and the usage of Object Storage." + "description": "The type of the monitoring, must be `none`, `prometheus`, `prometheusAgent` or `mimir`.\n\n- `none`: will disable the whole monitoring stack.\n- `prometheus`: will install Prometheus Operator and a preconfigured Prometheus instace, Alertmanager, a set of alert rules, exporters needed to monitor all the components of the cluster, Grafana and a series of dashboards to view the collected metrics, and more.\n- `prometheusAgent`: wil install Prometheus operator, an instance of Prometheus in Agent mode (no alerting, no queries, no storage), and all the exporters needed to get metrics for the status of the cluster and the workloads. Useful when having a centralized (remote) Prometheus where to ship the metrics and not storing them locally in the cluster.\n- `mimir`: will install the same as the `prometheus` option, plus Grafana Mimir that allows for longer retention of metrics and the usage of Object Storage.\n\nDefault is `prometheus`." }, "overrides": { "$ref": "#/$defs/Types.FuryModuleOverrides" @@ -1346,7 +1346,7 @@ "properties": { "deadManSwitchWebhookUrl": { "type": "string", - "description": "The webhook URL to send dead man's switch monitoring, for example to use with healthchecks.io" + "description": "The webhook URL to send dead man's switch monitoring, for example to use with healthchecks.io." }, "installDefaultRules": { "type": "boolean", @@ -1493,7 +1493,7 @@ "none", "tempo" ], - "description": "The type of tracing to use, either `none` or `tempo`. `none` will disable the Tracing module and `tempo` will install a Grafana Tempo deployment." + "description": "The type of tracing to use, either `none` or `tempo`. `none` will disable the Tracing module and `tempo` will install a Grafana Tempo deployment.\n\nDefault is `tempo`." }, "tempo": { "$ref": "#/$defs/Spec.Distribution.Modules.Tracing.Tempo" @@ -1651,7 +1651,7 @@ "gatekeeper", "kyverno" ], - "description": "The type of policy enforcement to use, either `none`, `gatekeeper` or `kyverno`." + "description": "The type of policy enforcement to use, either `none`, `gatekeeper` or `kyverno`.\n\nDefault is `none`." }, "gatekeeper": { "$ref": "#/$defs/Spec.Distribution.Modules.Policy.Gatekeeper" @@ -1775,7 +1775,7 @@ "none", "on-premises" ], - "description": "The type of the Disaster Recovery, must be `none` or `on-premises`. `none` disables the module and `on-premises` will install Velero and an optional MinIO deployment." + "description": "The type of the Disaster Recovery, must be `none` or `on-premises`. `none` disables the module and `on-premises` will install Velero and an optional MinIO deployment.\n\nDefault is `none`." }, "velero": { "$ref": "#/$defs/Spec.Distribution.Modules.Dr.Velero" @@ -2041,7 +2041,7 @@ "basicAuth", "sso" ], - "description": "The type of the Auth provider, options are:\n- `none`: will disable authentication in the infrastructural ingresses.\n- `sso`: will protect the infrastructural ingresses with Pomerium and Dex (SSO) and require authentication before accessing them.\n- `basicAuth`: will protect the infrastructural ingresses with HTTP basic auth (username and password) authentication." + "description": "The type of the Auth provider, options are:\n- `none`: will disable authentication in the infrastructural ingresses.\n- `sso`: will protect the infrastructural ingresses with Pomerium and Dex (SSO) and require authentication before accessing them.\n- `basicAuth`: will protect the infrastructural ingresses with HTTP basic auth (username and password) authentication.\n\nDefault is `none`." }, "basicAuth": { "$ref": "#/$defs/Spec.Distribution.Modules.Auth.Provider.BasicAuth" @@ -2298,11 +2298,11 @@ "properties": { "cpu": { "type": "string", - "description": "The cpu request for the loki pods" + "description": "The CPU request for the Pod, in cores. Example: `500m`." }, "memory": { "type": "string", - "description": "The memory request for the prometheus pods" + "description": "The memory request for the Pod. Example: `500M`." } } }, @@ -2312,11 +2312,11 @@ "properties": { "cpu": { "type": "string", - "description": "The cpu limit for the loki pods" + "description": "The CPU limit for the Pod. Example: `1000m`." }, "memory": { "type": "string", - "description": "The memory limit for the prometheus pods" + "description": "The memory limit for the Pod. Example: `1G`." } } } diff --git a/templates/config/ekscluster-kfd-v1alpha2.yaml.tpl b/templates/config/ekscluster-kfd-v1alpha2.yaml.tpl index 157f9ff37..2be3fe96f 100644 --- a/templates/config/ekscluster-kfd-v1alpha2.yaml.tpl +++ b/templates/config/ekscluster-kfd-v1alpha2.yaml.tpl @@ -143,7 +143,7 @@ spec: # to: 80 # # Additional AWS tags # tags: {} - # aws-auth configmap definition, see https://docs.aws.amazon.com/eks/latest/userguide/add-user-role.html for more informations + # aws-auth configmap definition, see https://docs.aws.amazon.com/eks/latest/userguide/add-user-role.html for more information. awsAuth: {} # additionalAccounts: # - "777777777777" @@ -209,7 +209,7 @@ spec: # - http01: # ingress: # class: nginx - # DNS definition, used in conjunction with externalDNS package to automate DNS management and certificates emission + # DNS definition, used in conjunction with externalDNS package to automate DNS management and certificates emission. dns: # the public DNS zone definition public: