diff --git a/banners/ekscluster.md b/banners/ekscluster.md index a66d70188..873a47e0c 100644 --- a/banners/ekscluster.md +++ b/banners/ekscluster.md @@ -2,5 +2,11 @@ This document explains the full schema for the `kind: EKSCluster` for the `furyctl.yaml` file used by `furyctl`. This configuration file will be used to deploy a Kubernetes Fury Cluster deployed through AWS's Elastic Kubernetes Service. -An example file can be found [here](https://github.com/sighupio/fury-distribution/blob/feature/schema-docs/templates/config/ekscluster-kfd-v1alpha2.yaml.tpl). +An example configuration file can be created by running the following command: +```bash +furyctl create config --kind EKSCluster --version v1.29.4 --name example-cluster +``` + +> [!NOTE] +> Replace the version with your desired version of KFD. diff --git a/banners/kfddistribution.md b/banners/kfddistribution.md index a44f13847..797d2678f 100644 --- a/banners/kfddistribution.md +++ b/banners/kfddistribution.md @@ -2,5 +2,11 @@ This document explains the full schema for the `kind: KFDDistribution` for the `furyctl.yaml` file used by `furyctl`. This configuration file will be used to deploy the Kubernetes Fury Distribution modules on top of an existing Kubernetes cluster. -An example file can be found [here](https://github.com/sighupio/fury-distribution/blob/feature/schema-docs/templates/config/kfddistribution-kfd-v1alpha2.yaml.tpl). +An example configuration file can be created by running the following command: +```bash +furyctl create config --kind KFDDistribution --version v1.29.4 --name example-cluster +``` + +> [!NOTE] +> Replace the version with your desired version of KFD. diff --git a/banners/onpremises.md b/banners/onpremises.md index a8d8983dd..7f05c77c8 100644 --- a/banners/onpremises.md +++ b/banners/onpremises.md @@ -2,5 +2,11 @@ This document explains the full schema for the `kind: OnPremises` for the `furyctl.yaml` file used by `furyctl`. This configuration file will be used to deploy the Kubernetes Fury Distribution modules and cluster on premises. -An example file can be found [here](https://github.com/sighupio/fury-distribution/blob/feature/schema-docs/templates/config/onpremises-kfd-v1alpha2.yaml.tpl). +An example configuration file can be created by running the following command: +```bash +furyctl create config --kind OnPremises --version v1.29.4 --name example-cluster +``` + +> [!NOTE] +> Replace the version with your desired version of KFD. diff --git a/defaults/ekscluster-kfd-v1alpha2.yaml b/defaults/ekscluster-kfd-v1alpha2.yaml index 41e37df57..5f9b60864 100644 --- a/defaults/ekscluster-kfd-v1alpha2.yaml +++ b/defaults/ekscluster-kfd-v1alpha2.yaml @@ -46,7 +46,7 @@ data: name: "" create: true # internal field, should be either the VPC ID taken from the kubernetes - # phase or the ID of the created VPC in the Ifra phase + # phase or the ID of the created VPC in the Infra phase vpcId: "" # common configuration for nginx ingress controller nginx: diff --git a/docs/schemas/ekscluster-kfd-v1alpha2.md b/docs/schemas/ekscluster-kfd-v1alpha2.md index 883026ec9..e028f2a70 100644 --- a/docs/schemas/ekscluster-kfd-v1alpha2.md +++ b/docs/schemas/ekscluster-kfd-v1alpha2.md @@ -2,8 +2,14 @@ This document explains the full schema for the `kind: EKSCluster` for the `furyctl.yaml` file used by `furyctl`. This configuration file will be used to deploy a Kubernetes Fury Cluster deployed through AWS's Elastic Kubernetes Service. -An example file can be found [here](https://github.com/sighupio/fury-distribution/blob/feature/schema-docs/templates/config/ekscluster-kfd-v1alpha2.yaml.tpl). +An example configuration file can be created by running the following command: +```bash +furyctl create config --kind EKSCluster --version v1.29.4 --name example-cluster +``` + +> [!NOTE] +> Replace the version with your desired version of KFD. ## Properties | Property | Type | Required | @@ -15,7 +21,7 @@ An example file can be found [here](https://github.com/sighupio/fury-distributio ### Description -A Fury Cluster deployed through AWS's Elastic Kubernetes Service +A KFD Cluster deployed on top of AWS's Elastic Kubernetes Service (EKS). ## .apiVersion @@ -49,6 +55,10 @@ A Fury Cluster deployed through AWS's Elastic Kubernetes Service ## .metadata.name +### Description + +The name of the cluster. It will also be used as a prefix for all the other resources created. + ### Constraints **maximum length**: the maximum number of characters for this string is: `56` @@ -92,11 +102,15 @@ A Fury Cluster deployed through AWS's Elastic Kubernetes Service | [relativeVendorPath](#specdistributioncommonrelativevendorpath) | `string` | Optional | | [tolerations](#specdistributioncommontolerations) | `array` | Optional | +### Description + +Common configuration for all the distribution modules. + ## .spec.distribution.common.nodeSelector ### Description -The node selector to use to place the pods for all the KFD modules +The node selector to use to place the pods for all the KFD modules. Follows Kubernetes selector format. Example: `node.kubernetes.io/role: infra`. ## .spec.distribution.common.provider @@ -110,21 +124,19 @@ The node selector to use to place the pods for all the KFD modules ### Description -The type of the provider, must be EKS if specified +The provider type. Don't set. FOR INTERNAL USE ONLY. ## .spec.distribution.common.registry ### Description -URL of the registry where to pull images from for the Distribution phase. (Default is registry.sighup.io/fury). - -NOTE: If plugins are pulling from the default registry, the registry will be replaced for these plugins too. +URL of the registry where to pull images from for the Distribution phase. (Default is `registry.sighup.io/fury`). ## .spec.distribution.common.relativeVendorPath ### Description -The relative path to the vendor directory, does not need to be changed +The relative path to the vendor directory, does not need to be changed. ## .spec.distribution.common.tolerations @@ -139,7 +151,13 @@ The relative path to the vendor directory, does not need to be changed ### Description -The tolerations that will be added to the pods for all the KFD modules +An array with the tolerations that will be added to the pods for all the KFD modules. Follows Kubernetes tolerations format. Example: + +```yaml +- effect: NoSchedule + key: node.kubernetes.io/role + value: infra +``` ## .spec.distribution.common.tolerations.effect @@ -525,11 +543,15 @@ The type of the secret | [pomerium](#specdistributionmodulesauthpomerium) | `object` | Optional | | [provider](#specdistributionmodulesauthprovider) | `object` | Required | +### Description + +Configuration for the Auth module. + ## .spec.distribution.modules.auth.baseDomain ### Description -The base domain for the auth module +Base domain for the ingresses created by the Auth module (Gangplank, Pomerium, Dex). Notice that when nginx type is dual, these will use the `external` ingress class. ## .spec.distribution.modules.auth.dex @@ -542,17 +564,32 @@ The base domain for the auth module | [expiry](#specdistributionmodulesauthdexexpiry) | `object` | Optional | | [overrides](#specdistributionmodulesauthdexoverrides) | `object` | Optional | +### Description + +Configuration for the Dex package. + ## .spec.distribution.modules.auth.dex.additionalStaticClients ### Description -The additional static clients for dex +Additional static clients defitions that will be added to the default clients included with the distribution in Dex's configuration. Example: + +```yaml +additionalStaticClients: + - id: my-custom-client + name: "A custom additional static client" + redirectURIs: + - "https://myapp.tld/redirect" + - "https://alias.tld/oidc-callback" + secret: supersecretpassword +``` +Reference: https://dexidp.io/docs/connectors/local/ ## .spec.distribution.modules.auth.dex.connectors ### Description -The connectors for dex +A list with each item defining a Dex connector. Follows Dex connectors configuration format: https://dexidp.io/docs/connectors/ ## .spec.distribution.modules.auth.dex.expiry @@ -588,7 +625,7 @@ Dex signing key expiration time duration (default 6h). ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.auth.dex.overrides.tolerations @@ -603,7 +640,7 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.auth.dex.overrides.tolerations.effect @@ -650,13 +687,21 @@ The value of the toleration | [nodeSelector](#specdistributionmodulesauthoverridesnodeselector) | `object` | Optional | | [tolerations](#specdistributionmodulesauthoverridestolerations) | `array` | Optional | +### Description + +Override the common configuration with a particular configuration for the Auth module. + ## .spec.distribution.modules.auth.overrides.ingresses +### Description + +Override the definition of the Auth module ingresses. + ## .spec.distribution.modules.auth.overrides.nodeSelector ### Description -The node selector to use to place the pods for the auth module +Set to override the node selector used to place the pods of the Auth module. ## .spec.distribution.modules.auth.overrides.tolerations @@ -671,7 +716,7 @@ The node selector to use to place the pods for the auth module ### Description -The tolerations that will be added to the pods for the auth module +Set to override the tolerations that will be added to the pods of the Auth module. ## .spec.distribution.modules.auth.overrides.tolerations.effect @@ -895,23 +940,32 @@ cat ec_private.pem | base64 | [password](#specdistributionmodulesauthproviderbasicauthpassword) | `string` | Required | | [username](#specdistributionmodulesauthproviderbasicauthusername) | `string` | Required | +### Description + +Configuration for the HTTP Basic Auth provider. + ## .spec.distribution.modules.auth.provider.basicAuth.password ### Description -The password for the basic auth +The password for logging in with the HTTP basic authentication. ## .spec.distribution.modules.auth.provider.basicAuth.username ### Description -The username for the basic auth +The username for logging in with the HTTP basic authentication. ## .spec.distribution.modules.auth.provider.type ### Description -The type of the provider, must be ***none***, ***sso*** or ***basicAuth*** +The type of the Auth provider, options are: +- `none`: will disable authentication in the infrastructural ingresses. +- `sso`: will protect the infrastructural ingresses with Pomerium and Dex (SSO) and require authentication before accessing them. +- `basicAuth`: will protect the infrastructural ingresses with HTTP basic auth (username and password) authentication. + +Default is `none`. ### Constraints @@ -969,7 +1023,7 @@ The type of the provider, must be ***none***, ***sso*** or ***basicAuth*** ### Description -The node selector to use to place the pods for the load balancer controller module +The node selector to use to place the pods for the load balancer controller module. ## .spec.distribution.modules.aws.clusterAutoscaler.overrides.tolerations @@ -984,7 +1038,7 @@ The node selector to use to place the pods for the load balancer controller modu ### Description -The tolerations that will be added to the pods for the cluster autoscaler module +The tolerations that will be added to the pods for the cluster autoscaler module. ## .spec.distribution.modules.aws.clusterAutoscaler.overrides.tolerations.effect @@ -1055,7 +1109,7 @@ The value of the toleration ### Description -The node selector to use to place the pods for the load balancer controller module +The node selector to use to place the pods for the load balancer controller module. ## .spec.distribution.modules.aws.ebsCsiDriver.overrides.tolerations @@ -1070,7 +1124,7 @@ The node selector to use to place the pods for the load balancer controller modu ### Description -The tolerations that will be added to the pods for the cluster autoscaler module +The tolerations that will be added to the pods for the cluster autoscaler module. ## .spec.distribution.modules.aws.ebsCsiDriver.overrides.tolerations.effect @@ -1128,7 +1182,7 @@ The value of the toleration ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.aws.ebsSnapshotController.overrides.tolerations @@ -1143,7 +1197,7 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.aws.ebsSnapshotController.overrides.tolerations.effect @@ -1214,7 +1268,7 @@ The value of the toleration ### Description -The node selector to use to place the pods for the load balancer controller module +The node selector to use to place the pods for the load balancer controller module. ## .spec.distribution.modules.aws.loadBalancerController.overrides.tolerations @@ -1229,7 +1283,7 @@ The node selector to use to place the pods for the load balancer controller modu ### Description -The tolerations that will be added to the pods for the cluster autoscaler module +The tolerations that will be added to the pods for the cluster autoscaler module. ## .spec.distribution.modules.aws.loadBalancerController.overrides.tolerations.effect @@ -1276,13 +1330,17 @@ The value of the toleration | [nodeSelector](#specdistributionmodulesawsoverridesnodeselector) | `object` | Optional | | [tolerations](#specdistributionmodulesawsoverridestolerations) | `array` | Optional | +### Description + +Override the common configuration with a particular configuration for the module. + ## .spec.distribution.modules.aws.overrides.ingresses ## .spec.distribution.modules.aws.overrides.nodeSelector ### Description -The node selector to use to place the pods for the dr module +Set to override the node selector used to place the pods of the module. ## .spec.distribution.modules.aws.overrides.tolerations @@ -1297,7 +1355,7 @@ The node selector to use to place the pods for the dr module ### Description -The tolerations that will be added to the pods for the monitoring module +Set to override the tolerations that will be added to the pods of the module. ## .spec.distribution.modules.aws.overrides.tolerations.effect @@ -1344,6 +1402,10 @@ The value of the toleration | [type](#specdistributionmodulesdrtype) | `string` | Required | | [velero](#specdistributionmodulesdrvelero) | `object` | Optional | +### Description + +Configuration for the Disaster Recovery module. + ## .spec.distribution.modules.dr.overrides ### Properties @@ -1354,13 +1416,17 @@ The value of the toleration | [nodeSelector](#specdistributionmodulesdroverridesnodeselector) | `object` | Optional | | [tolerations](#specdistributionmodulesdroverridestolerations) | `array` | Optional | +### Description + +Override the common configuration with a particular configuration for the module. + ## .spec.distribution.modules.dr.overrides.ingresses ## .spec.distribution.modules.dr.overrides.nodeSelector ### Description -The node selector to use to place the pods for the dr module +Set to override the node selector used to place the pods of the module. ## .spec.distribution.modules.dr.overrides.tolerations @@ -1375,7 +1441,7 @@ The node selector to use to place the pods for the dr module ### Description -The tolerations that will be added to the pods for the monitoring module +Set to override the tolerations that will be added to the pods of the module. ## .spec.distribution.modules.dr.overrides.tolerations.effect @@ -1416,7 +1482,9 @@ The value of the toleration ### Description -The type of the DR, must be ***none*** or ***eks*** +The type of the Disaster Recovery, must be `none` or `eks`. `none` disables the module and `eks` will install Velero and use an S3 bucket to store the backups. + +Default is `none`. ### Constraints @@ -1450,13 +1518,13 @@ The type of the DR, must be ***none*** or ***eks*** ### Description -The name of the velero bucket +The name of the bucket for Velero. ## .spec.distribution.modules.dr.velero.eks.region ### Description -The region where the velero bucket is located +The region where the bucket for Velero will be located. ### Constraints @@ -1507,7 +1575,7 @@ The region where the velero bucket is located ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.dr.velero.overrides.tolerations @@ -1522,7 +1590,7 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.dr.velero.overrides.tolerations.effect @@ -1665,7 +1733,7 @@ Whether to install or not the default `manifests` and `full` backups schedules. ### Description -the base domain used for all the KFD ingresses, if in the nginx dual configuration, it should be the same as the .spec.distribution.modules.ingress.dns.private.name zone +The base domain used for all the KFD ingresses. If in the nginx `dual` configuration type, this value should be the same as the `.spec.distribution.modules.ingress.dns.private.name` zone. ## .spec.distribution.modules.ingress.certManager @@ -1676,6 +1744,10 @@ the base domain used for all the KFD ingresses, if in the nginx dual configurati | [clusterIssuer](#specdistributionmodulesingresscertmanagerclusterissuer) | `object` | Required | | [overrides](#specdistributionmodulesingresscertmanageroverrides) | `object` | Optional | +### Description + +Configuration for the cert-manager package. Required even if `ingress.nginx.type` is `none`, cert-manager is used for managing other certificates in the cluster besides the TLS termination certificates for the ingresses. + ## .spec.distribution.modules.ingress.certManager.clusterIssuer ### Properties @@ -1687,29 +1759,33 @@ the base domain used for all the KFD ingresses, if in the nginx dual configurati | [solvers](#specdistributionmodulesingresscertmanagerclusterissuersolvers) | `array` | Optional | | [type](#specdistributionmodulesingresscertmanagerclusterissuertype) | `string` | Optional | +### Description + +Configuration for the cert-manager's ACME clusterIssuer used to request certificates from Let's Encrypt. + ## .spec.distribution.modules.ingress.certManager.clusterIssuer.email ### Description -The email of the cluster issuer +The email address to use during the certificate issuing process. ## .spec.distribution.modules.ingress.certManager.clusterIssuer.name ### Description -The name of the cluster issuer +Name of the clusterIssuer. ## .spec.distribution.modules.ingress.certManager.clusterIssuer.solvers ### Description -The custom solvers configurations +List of challenge solvers to use instead of the default one for the `http01` challenge. ## .spec.distribution.modules.ingress.certManager.clusterIssuer.type ### Description -The type of the cluster issuer, must be ***dns01*** or ***http01*** +The type of the clusterIssuer, must be `dns01` for using DNS challenge or `http01` for using HTTP challenge. ### Constraints @@ -1733,7 +1809,7 @@ The type of the cluster issuer, must be ***dns01*** or ***http01*** ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.ingress.certManager.overrides.tolerations @@ -1748,7 +1824,7 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.ingress.certManager.overrides.tolerations.effect @@ -1795,6 +1871,10 @@ The value of the toleration | [private](#specdistributionmodulesingressdnsprivate) | `object` | Optional | | [public](#specdistributionmodulesingressdnspublic) | `object` | Optional | +### Description + +DNS definition, used in conjunction with `externalDNS` package to automate DNS management and certificates emission. + ## .spec.distribution.modules.ingress.dns.overrides ### Properties @@ -1808,7 +1888,7 @@ The value of the toleration ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.ingress.dns.overrides.tolerations @@ -1823,7 +1903,7 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.ingress.dns.overrides.tolerations.effect @@ -1869,17 +1949,21 @@ The value of the toleration | [create](#specdistributionmodulesingressdnsprivatecreate) | `boolean` | Required | | [name](#specdistributionmodulesingressdnsprivatename) | `string` | Required | +### Description + +The private DNS zone is used only when `ingress.nginx.type` is `dual`, for exposing infrastructural services only in the private DNS zone. + ## .spec.distribution.modules.ingress.dns.private.create ### Description -If true, the private hosted zone will be created +By default, a Terraform data source will be used to get the private DNS zone. Set to `true` to create the private zone instead. ## .spec.distribution.modules.ingress.dns.private.name ### Description -The name of the private hosted zone +The name of the private hosted zone. Example: `internal.fury-demo.sighup.io`. ## .spec.distribution.modules.ingress.dns.public @@ -1894,13 +1978,13 @@ The name of the private hosted zone ### Description -If true, the public hosted zone will be created +By default, a Terraform data source will be used to get the public DNS zone. Set to `true` to create the public zone instead. ## .spec.distribution.modules.ingress.dns.public.name ### Description -The name of the public hosted zone +The name of the public hosted zone. ## .spec.distribution.modules.ingress.forecastle @@ -1923,7 +2007,7 @@ The name of the public hosted zone ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.ingress.forecastle.overrides.tolerations @@ -1938,7 +2022,7 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.ingress.forecastle.overrides.tolerations.effect @@ -1987,7 +2071,7 @@ The value of the toleration ### Description -Configurations for the nginx ingress controller module +Configurations for the Ingress nginx controller package. ## .spec.distribution.modules.ingress.nginx.overrides @@ -2002,7 +2086,7 @@ Configurations for the nginx ingress controller module ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.ingress.nginx.overrides.tolerations @@ -2017,7 +2101,7 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.ingress.nginx.overrides.tolerations.effect @@ -2067,7 +2151,7 @@ The value of the toleration ### Description -The provider of the TLS certificate, must be ***none***, ***certManager*** or ***secret*** +The provider of the TLS certificates for the ingresses, one of: `none`, `certManager`, or `secret`. ### Constraints @@ -2089,21 +2173,38 @@ The provider of the TLS certificate, must be ***none***, ***certManager*** or ** | [cert](#specdistributionmodulesingressnginxtlssecretcert) | `string` | Required | | [key](#specdistributionmodulesingressnginxtlssecretkey) | `string` | Required | +### Description + +Kubernetes TLS secret for the ingresses TLS certificate. + ## .spec.distribution.modules.ingress.nginx.tls.secret.ca +### Description + +The Certificate Authority certificate file's content. You can use the `"{file://}"` notation to get the content from a file. + ## .spec.distribution.modules.ingress.nginx.tls.secret.cert ### Description -The certificate file content or you can use the file notation to get the content from a file +The certificate file's content. You can use the `"{file://}"` notation to get the content from a file. ## .spec.distribution.modules.ingress.nginx.tls.secret.key +### Description + +The signing key file's content. You can use the `"{file://}"` notation to get the content from a file. + ## .spec.distribution.modules.ingress.nginx.type ### Description -The type of the nginx ingress controller, must be ***none***, ***single*** or ***dual*** +The type of the Ingress nginx controller, options are: +- `none`: no ingress controller will be installed and no infrastructural ingresses will be created. +- `single`: a single ingress controller with ingress class `nginx` will be installed to manage all the ingress resources, infrastructural ingresses will be created. +- `dual`: two independent ingress controllers will be installed, one for the `internal` ingress class intended for private ingresses and one for the `external` ingress class intended for public ingresses. KFD infrastructural ingresses wil use the `internal` ingress class when using the dual type. + +Default is `single`. ### Constraints @@ -2125,6 +2226,10 @@ The type of the nginx ingress controller, must be ***none***, ***single*** or ** | [nodeSelector](#specdistributionmodulesingressoverridesnodeselector) | `object` | Optional | | [tolerations](#specdistributionmodulesingressoverridestolerations) | `array` | Optional | +### Description + +Override the common configuration with a particular configuration for the Ingress module. + ## .spec.distribution.modules.ingress.overrides.ingresses ### Properties @@ -2147,25 +2252,25 @@ The type of the nginx ingress controller, must be ***none***, ***single*** or ** ### Description -If true, the ingress will not have authentication +If true, the ingress will not have authentication even if `.spec.modules.auth.provider.type` is SSO or Basic Auth. ## .spec.distribution.modules.ingress.overrides.ingresses.forecastle.host ### Description -The host of the ingress +Use this host for the ingress instead of the default one. ## .spec.distribution.modules.ingress.overrides.ingresses.forecastle.ingressClass ### Description -The ingress class of the ingress +Use this ingress class for the ingress instead of the default one. ## .spec.distribution.modules.ingress.overrides.nodeSelector ### Description -The node selector to use to place the pods for the ingress module +Set to override the node selector used to place the pods of the Ingress module. ## .spec.distribution.modules.ingress.overrides.tolerations @@ -2180,7 +2285,7 @@ The node selector to use to place the pods for the ingress module ### Description -The tolerations that will be added to the pods for the ingress module +Set to override the tolerations that will be added to the pods of the Ingress module. ## .spec.distribution.modules.ingress.overrides.tolerations.effect @@ -2232,6 +2337,10 @@ The value of the toleration | [overrides](#specdistributionmodulesloggingoverrides) | `object` | Optional | | [type](#specdistributionmodulesloggingtype) | `string` | Required | +### Description + +Configuration for the Logging module. + ## .spec.distribution.modules.logging.cerebro ### Properties @@ -2240,6 +2349,10 @@ The value of the toleration |:-------------------------------------------------------------|:---------|:---------| | [overrides](#specdistributionmodulesloggingcerebrooverrides) | `object` | Optional | +### Description + +DEPRECATED since KFD v1.26.6, 1.27.5, v1.28.0. + ## .spec.distribution.modules.logging.cerebro.overrides ### Properties @@ -2253,7 +2366,7 @@ The value of the toleration ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.logging.cerebro.overrides.tolerations @@ -2268,7 +2381,7 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.logging.cerebro.overrides.tolerations.effect @@ -2322,55 +2435,55 @@ The value of the toleration ### Description -when using the customOutputs logging type, you need to manually specify the spec of the several Output and ClusterOutputs that the Logging Operator expects to forward the logs collected by the pre-defined flows. +When using the `customOutputs` logging type, you need to manually specify the spec of the several `Output` and `ClusterOutputs` that the Logging Operator expects to forward the logs collected by the pre-defined flows. ## .spec.distribution.modules.logging.customOutputs.audit ### Description -This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow. +This value defines where the output from the `audit` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}` ## .spec.distribution.modules.logging.customOutputs.errors ### Description -This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow. +This value defines where the output from the `errors` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}` ## .spec.distribution.modules.logging.customOutputs.events ### Description -This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow. +This value defines where the output from the `events` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}` ## .spec.distribution.modules.logging.customOutputs.infra ### Description -This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow. +This value defines where the output from the `infra` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}` ## .spec.distribution.modules.logging.customOutputs.ingressNginx ### Description -This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow. +This value defines where the output from the `ingressNginx` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}` ## .spec.distribution.modules.logging.customOutputs.kubernetes ### Description -This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow. +This value defines where the output from the `kubernetes` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}` ## .spec.distribution.modules.logging.customOutputs.systemdCommon ### Description -This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow. +This value defines where the output from the `systemdCommon` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}` ## .spec.distribution.modules.logging.customOutputs.systemdEtcd ### Description -This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow. +This value defines where the output from the `systemdEtcd` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}` ## .spec.distribution.modules.logging.loki @@ -2383,8 +2496,16 @@ This value defines where the output from Flow will be sent. Will be the `spec` s | [resources](#specdistributionmoduleslogginglokiresources) | `object` | Optional | | [tsdbStartDate](#specdistributionmoduleslogginglokitsdbstartdate) | `string` | Required | +### Description + +Configuration for the Loki package. + ## .spec.distribution.modules.logging.loki.backend +### Description + +The storage backend type for Loki. `minio` will use an in-cluster MinIO deployment for object storage, `externalEndpoint` can be used to point to an external object storage instead of deploying an in-cluster MinIO. + ### Constraints **enum**: the value of this property must be equal to one of the following string values: @@ -2406,35 +2527,39 @@ This value defines where the output from Flow will be sent. Will be the `spec` s | [insecure](#specdistributionmoduleslogginglokiexternalendpointinsecure) | `boolean` | Optional | | [secretAccessKey](#specdistributionmoduleslogginglokiexternalendpointsecretaccesskey) | `string` | Optional | +### Description + +Configuration for Loki's external storage backend. + ## .spec.distribution.modules.logging.loki.externalEndpoint.accessKeyId ### Description -The access key id of the loki external endpoint +The access key ID (username) for the external S3-compatible bucket. ## .spec.distribution.modules.logging.loki.externalEndpoint.bucketName ### Description -The bucket name of the loki external endpoint +The bucket name of the external S3-compatible object storage. ## .spec.distribution.modules.logging.loki.externalEndpoint.endpoint ### Description -The endpoint of the loki external endpoint +External S3-compatible endpoint for Loki's storage. ## .spec.distribution.modules.logging.loki.externalEndpoint.insecure ### Description -If true, the loki external endpoint will be insecure +If true, will use HTTP as protocol instead of HTTPS. ## .spec.distribution.modules.logging.loki.externalEndpoint.secretAccessKey ### Description -The secret access key of the loki external endpoint +The secret access key (password) for the external S3-compatible bucket. ## .spec.distribution.modules.logging.loki.resources @@ -2458,13 +2583,13 @@ The secret access key of the loki external endpoint ### Description -The cpu limit for the opensearch pods +The CPU limit for the Pod. Example: `1000m`. ## .spec.distribution.modules.logging.loki.resources.limits.memory ### Description -The memory limit for the opensearch pods +The memory limit for the Pod. Example: `1G`. ## .spec.distribution.modules.logging.loki.resources.requests @@ -2479,13 +2604,13 @@ The memory limit for the opensearch pods ### Description -The cpu request for the prometheus pods +The CPU request for the Pod, in cores. Example: `500m`. ## .spec.distribution.modules.logging.loki.resources.requests.memory ### Description -The memory request for the opensearch pods +The memory request for the Pod. Example: `500M`. ## .spec.distribution.modules.logging.loki.tsdbStartDate @@ -2507,6 +2632,10 @@ Value must be a string in `ISO 8601` date format (`yyyy-mm-dd`). Example: `2024- | [rootUser](#specdistributionmodulesloggingminiorootuser) | `object` | Optional | | [storageSize](#specdistributionmodulesloggingminiostoragesize) | `string` | Optional | +### Description + +Configuration for Logging's MinIO deployment. + ## .spec.distribution.modules.logging.minio.overrides ### Properties @@ -2520,7 +2649,7 @@ Value must be a string in `ISO 8601` date format (`yyyy-mm-dd`). Example: `2024- ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.logging.minio.overrides.tolerations @@ -2535,7 +2664,7 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.logging.minio.overrides.tolerations.effect @@ -2585,19 +2714,19 @@ The value of the toleration ### Description -The password of the minio root user +The password for the default MinIO root user. ## .spec.distribution.modules.logging.minio.rootUser.username ### Description -The username of the minio root user +The username for the default MinIO root user. ## .spec.distribution.modules.logging.minio.storageSize ### Description -The PVC size for each minio disk, 6 disks total +The PVC size for each MinIO disk, 6 disks total. ## .spec.distribution.modules.logging.opensearch @@ -2623,7 +2752,7 @@ The PVC size for each minio disk, 6 disks total ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.logging.opensearch.overrides.tolerations @@ -2638,7 +2767,7 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.logging.opensearch.overrides.tolerations.effect @@ -2697,13 +2826,13 @@ The value of the toleration ### Description -The cpu limit for the opensearch pods +The CPU limit for the Pod. Example: `1000m`. ## .spec.distribution.modules.logging.opensearch.resources.limits.memory ### Description -The memory limit for the opensearch pods +The memory limit for the Pod. Example: `1G`. ## .spec.distribution.modules.logging.opensearch.resources.requests @@ -2718,25 +2847,25 @@ The memory limit for the opensearch pods ### Description -The cpu request for the prometheus pods +The CPU request for the Pod, in cores. Example: `500m`. ## .spec.distribution.modules.logging.opensearch.resources.requests.memory ### Description -The memory request for the opensearch pods +The memory request for the Pod. Example: `500M`. ## .spec.distribution.modules.logging.opensearch.storageSize ### Description -The storage size for the opensearch pods +The storage size for the OpenSearch volumes. ## .spec.distribution.modules.logging.opensearch.type ### Description -The type of the opensearch, must be ***single*** or ***triple*** +The type of OpenSearch deployment. One of: `single` for a single replica or `triple` for an HA 3-replicas deployment. ### Constraints @@ -2755,6 +2884,10 @@ The type of the opensearch, must be ***single*** or ***triple*** |:--------------------------------------------------------------|:---------|:---------| | [overrides](#specdistributionmodulesloggingoperatoroverrides) | `object` | Optional | +### Description + +Configuration for the Logging Operator. + ## .spec.distribution.modules.logging.operator.overrides ### Properties @@ -2768,7 +2901,7 @@ The type of the opensearch, must be ***single*** or ***triple*** ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.logging.operator.overrides.tolerations @@ -2783,7 +2916,7 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.logging.operator.overrides.tolerations.effect @@ -2830,13 +2963,17 @@ The value of the toleration | [nodeSelector](#specdistributionmodulesloggingoverridesnodeselector) | `object` | Optional | | [tolerations](#specdistributionmodulesloggingoverridestolerations) | `array` | Optional | +### Description + +Override the common configuration with a particular configuration for the module. + ## .spec.distribution.modules.logging.overrides.ingresses ## .spec.distribution.modules.logging.overrides.nodeSelector ### Description -The node selector to use to place the pods for the dr module +Set to override the node selector used to place the pods of the module. ## .spec.distribution.modules.logging.overrides.tolerations @@ -2851,7 +2988,7 @@ The node selector to use to place the pods for the dr module ### Description -The tolerations that will be added to the pods for the monitoring module +Set to override the tolerations that will be added to the pods of the module. ## .spec.distribution.modules.logging.overrides.tolerations.effect @@ -2892,7 +3029,13 @@ The value of the toleration ### Description -selects the logging stack. Choosing none will disable the centralized logging. Choosing opensearch will deploy and configure the Logging Operator and an OpenSearch cluster (can be single or triple for HA) where the logs will be stored. Choosing loki will use a distributed Grafana Loki instead of OpenSearh for storage. Choosing customOuput the Logging Operator will be deployed and installed but with no local storage, you will have to create the needed Outputs and ClusterOutputs to ship the logs to your desired storage. +Selects the logging stack. Options are: +- `none`: will disable the centralized logging. +- `opensearch`: will deploy and configure the Logging Operator and an OpenSearch cluster (can be single or triple for HA) where the logs will be stored. +- `loki`: will use a distributed Grafana Loki instead of OpenSearh for storage. +- `customOuputs`: the Logging Operator will be deployed and installed but with no local storage, you will have to create the needed Outputs and ClusterOutputs to ship the logs to your desired storage. + +Default is `opensearch`. ### Constraints @@ -2925,7 +3068,7 @@ selects the logging stack. Choosing none will disable the centralized logging. C ### Description -configuration for the Monitoring module components +Configuration for the Monitoring module. ## .spec.distribution.modules.monitoring.alertmanager @@ -2941,19 +3084,19 @@ configuration for the Monitoring module components ### Description -The webhook url to send deadman switch monitoring, for example to use with healthchecks.io +The webhook URL to send dead man's switch monitoring, for example to use with healthchecks.io. ## .spec.distribution.modules.monitoring.alertmanager.installDefaultRules ### Description -If true, the default rules will be installed +Set to false to avoid installing the Prometheus rules (alerts) included with the distribution. ## .spec.distribution.modules.monitoring.alertmanager.slackWebhookUrl ### Description -The slack webhook url to send alerts +The Slack webhook URL where to send the infrastructural and workload alerts to. ## .spec.distribution.modules.monitoring.blackboxExporter @@ -2976,7 +3119,7 @@ The slack webhook url to send alerts ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.monitoring.blackboxExporter.overrides.tolerations @@ -2991,7 +3134,7 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.monitoring.blackboxExporter.overrides.tolerations.effect @@ -3059,7 +3202,7 @@ Notice that by default anonymous access is enabled. ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.monitoring.grafana.overrides.tolerations @@ -3074,7 +3217,7 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.monitoring.grafana.overrides.tolerations.effect @@ -3144,7 +3287,7 @@ More details in [Grafana's documentation](https://grafana.com/docs/grafana/lates ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.monitoring.kubeStateMetrics.overrides.tolerations @@ -3159,7 +3302,7 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.monitoring.kubeStateMetrics.overrides.tolerations.effect @@ -3207,11 +3350,15 @@ The value of the toleration | [overrides](#specdistributionmodulesmonitoringmimiroverrides) | `object` | Optional | | [retentionTime](#specdistributionmodulesmonitoringmimirretentiontime) | `string` | Optional | +### Description + +Configuration for the Mimir package. + ## .spec.distribution.modules.monitoring.mimir.backend ### Description -The backend for the mimir pods, must be ***minio*** or ***externalEndpoint*** +The storage backend type for Mimir. `minio` will use an in-cluster MinIO deployment for object storage, `externalEndpoint` can be used to point to an external S3-compatible object storage instead of deploying an in-cluster MinIO. ### Constraints @@ -3234,35 +3381,39 @@ The backend for the mimir pods, must be ***minio*** or ***externalEndpoint*** | [insecure](#specdistributionmodulesmonitoringmimirexternalendpointinsecure) | `boolean` | Optional | | [secretAccessKey](#specdistributionmodulesmonitoringmimirexternalendpointsecretaccesskey) | `string` | Optional | +### Description + +Configuration for Mimir's external storage backend. + ## .spec.distribution.modules.monitoring.mimir.externalEndpoint.accessKeyId ### Description -The access key id of the external mimir backend +The access key ID (username) for the external S3-compatible bucket. ## .spec.distribution.modules.monitoring.mimir.externalEndpoint.bucketName ### Description -The bucket name of the external mimir backend +The bucket name of the external S3-compatible object storage. ## .spec.distribution.modules.monitoring.mimir.externalEndpoint.endpoint ### Description -The endpoint of the external mimir backend +External S3-compatible endpoint for Mimir's storage. ## .spec.distribution.modules.monitoring.mimir.externalEndpoint.insecure ### Description -If true, the external mimir backend will not use tls +If true, will use HTTP as protocol instead of HTTPS. ## .spec.distribution.modules.monitoring.mimir.externalEndpoint.secretAccessKey ### Description -The secret access key of the external mimir backend +The secret access key (password) for the external S3-compatible bucket. ## .spec.distribution.modules.monitoring.mimir.overrides @@ -3277,7 +3428,7 @@ The secret access key of the external mimir backend ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.monitoring.mimir.overrides.tolerations @@ -3292,7 +3443,7 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.monitoring.mimir.overrides.tolerations.effect @@ -3333,7 +3484,7 @@ The value of the toleration ### Description -The retention time for the mimir pods +The retention time for the logs stored in Mimir. Default is `30d`. Value must match the regular expression `[0-9]+(ns|us|µs|ms|s|m|h|d|w|y)` where y = 365 days. ## .spec.distribution.modules.monitoring.minio @@ -3345,6 +3496,10 @@ The retention time for the mimir pods | [rootUser](#specdistributionmodulesmonitoringminiorootuser) | `object` | Optional | | [storageSize](#specdistributionmodulesmonitoringminiostoragesize) | `string` | Optional | +### Description + +Configuration for Monitoring's MinIO deployment. + ## .spec.distribution.modules.monitoring.minio.overrides ### Properties @@ -3358,7 +3513,7 @@ The retention time for the mimir pods ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.monitoring.minio.overrides.tolerations @@ -3373,7 +3528,7 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.monitoring.minio.overrides.tolerations.effect @@ -3423,19 +3578,19 @@ The value of the toleration ### Description -The password for the minio root user +The password for the default MinIO root user. ## .spec.distribution.modules.monitoring.minio.rootUser.username ### Description -The username for the minio root user +The username for the default MinIO root user. ## .spec.distribution.modules.monitoring.minio.storageSize ### Description -The storage size for the minio pods +The PVC size for each MinIO disk, 6 disks total. ## .spec.distribution.modules.monitoring.overrides @@ -3447,13 +3602,17 @@ The storage size for the minio pods | [nodeSelector](#specdistributionmodulesmonitoringoverridesnodeselector) | `object` | Optional | | [tolerations](#specdistributionmodulesmonitoringoverridestolerations) | `array` | Optional | +### Description + +Override the common configuration with a particular configuration for the module. + ## .spec.distribution.modules.monitoring.overrides.ingresses ## .spec.distribution.modules.monitoring.overrides.nodeSelector ### Description -The node selector to use to place the pods for the dr module +Set to override the node selector used to place the pods of the module. ## .spec.distribution.modules.monitoring.overrides.tolerations @@ -3468,7 +3627,7 @@ The node selector to use to place the pods for the dr module ### Description -The tolerations that will be added to the pods for the monitoring module +Set to override the tolerations that will be added to the pods of the module. ## .spec.distribution.modules.monitoring.overrides.tolerations.effect @@ -3547,13 +3706,13 @@ Set this option to ship the collected metrics to a remote Prometheus receiver. ### Description -The cpu limit for the opensearch pods +The CPU limit for the Pod. Example: `1000m`. ## .spec.distribution.modules.monitoring.prometheus.resources.limits.memory ### Description -The memory limit for the opensearch pods +The memory limit for the Pod. Example: `1G`. ## .spec.distribution.modules.monitoring.prometheus.resources.requests @@ -3568,31 +3727,31 @@ The memory limit for the opensearch pods ### Description -The cpu request for the prometheus pods +The CPU request for the Pod, in cores. Example: `500m`. ## .spec.distribution.modules.monitoring.prometheus.resources.requests.memory ### Description -The memory request for the opensearch pods +The memory request for the Pod. Example: `500M`. ## .spec.distribution.modules.monitoring.prometheus.retentionSize ### Description -The retention size for the k8s Prometheus instance. +The retention size for the `k8s` Prometheus instance. ## .spec.distribution.modules.monitoring.prometheus.retentionTime ### Description -The retention time for the k8s Prometheus instance. +The retention time for the `k8s` Prometheus instance. ## .spec.distribution.modules.monitoring.prometheus.storageSize ### Description -The storage size for the k8s Prometheus instance. +The storage size for the `k8s` Prometheus instance. ## .spec.distribution.modules.monitoring.prometheusAgent @@ -3633,13 +3792,13 @@ Set this option to ship the collected metrics to a remote Prometheus receiver. ### Description -The cpu limit for the opensearch pods +The CPU limit for the Pod. Example: `1000m`. ## .spec.distribution.modules.monitoring.prometheusAgent.resources.limits.memory ### Description -The memory limit for the opensearch pods +The memory limit for the Pod. Example: `1G`. ## .spec.distribution.modules.monitoring.prometheusAgent.resources.requests @@ -3654,24 +3813,26 @@ The memory limit for the opensearch pods ### Description -The cpu request for the prometheus pods +The CPU request for the Pod, in cores. Example: `500m`. ## .spec.distribution.modules.monitoring.prometheusAgent.resources.requests.memory ### Description -The memory request for the opensearch pods +The memory request for the Pod. Example: `500M`. ## .spec.distribution.modules.monitoring.type ### Description -The type of the monitoring, must be ***none***, ***prometheus***, ***prometheusAgent*** or ***mimir***. +The type of the monitoring, must be `none`, `prometheus`, `prometheusAgent` or `mimir`. - `none`: will disable the whole monitoring stack. -- `prometheus`: will install Prometheus Operator and a preconfigured Prometheus instance, Alertmanager, a set of alert rules, exporters needed to monitor all the components of the cluster, Grafana and a series of dashboards to view the collected metrics, and more. +- `prometheus`: will install Prometheus Operator and a preconfigured Prometheus instace, Alertmanager, a set of alert rules, exporters needed to monitor all the components of the cluster, Grafana and a series of dashboards to view the collected metrics, and more. - `prometheusAgent`: wil install Prometheus operator, an instance of Prometheus in Agent mode (no alerting, no queries, no storage), and all the exporters needed to get metrics for the status of the cluster and the workloads. Useful when having a centralized (remote) Prometheus where to ship the metrics and not storing them locally in the cluster. -- `mimir`: will install the same as the `prometheus` option, and in addition Grafana Mimir that allows for longer retention of metrics and the usage of Object Storage. +- `mimir`: will install the same as the `prometheus` option, plus Grafana Mimir that allows for longer retention of metrics and the usage of Object Storage. + +Default is `prometheus`. ### Constraints @@ -3705,7 +3866,7 @@ The type of the monitoring, must be ***none***, ***prometheus***, ***prometheusA ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.monitoring.x509Exporter.overrides.tolerations @@ -3720,7 +3881,7 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.monitoring.x509Exporter.overrides.tolerations.effect @@ -3766,20 +3927,31 @@ The value of the toleration | [overrides](#specdistributionmodulesnetworkingoverrides) | `object` | Optional | | [tigeraOperator](#specdistributionmodulesnetworkingtigeraoperator) | `object` | Optional | +### Description + +Configuration for the Networking module. + ## .spec.distribution.modules.networking.overrides ### Properties | Property | Type | Required | |:------------------------------------------------------------------------|:---------|:---------| +| [ingresses](#specdistributionmodulesnetworkingoverridesingresses) | `object` | Optional | | [nodeSelector](#specdistributionmodulesnetworkingoverridesnodeselector) | `object` | Optional | | [tolerations](#specdistributionmodulesnetworkingoverridestolerations) | `array` | Optional | +### Description + +Override the common configuration with a particular configuration for the module. + +## .spec.distribution.modules.networking.overrides.ingresses + ## .spec.distribution.modules.networking.overrides.nodeSelector ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the module. ## .spec.distribution.modules.networking.overrides.tolerations @@ -3794,7 +3966,7 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the module. ## .spec.distribution.modules.networking.overrides.tolerations.effect @@ -3852,7 +4024,7 @@ The value of the toleration ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.networking.tigeraOperator.overrides.tolerations @@ -3867,7 +4039,7 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.networking.tigeraOperator.overrides.tolerations.effect @@ -3915,6 +4087,10 @@ The value of the toleration | [overrides](#specdistributionmodulespolicyoverrides) | `object` | Optional | | [type](#specdistributionmodulespolicytype) | `string` | Required | +### Description + +Configuration for the Policy module. + ## .spec.distribution.modules.policy.gatekeeper ### Properties @@ -3926,6 +4102,10 @@ The value of the toleration | [installDefaultPolicies](#specdistributionmodulespolicygatekeeperinstalldefaultpolicies) | `boolean` | Required | | [overrides](#specdistributionmodulespolicygatekeeperoverrides) | `object` | Optional | +### Description + +Configuration for the Gatekeeper package. + ## .spec.distribution.modules.policy.gatekeeper.additionalExcludedNamespaces ### Description @@ -3936,7 +4116,7 @@ This parameter adds namespaces to Gatekeeper's exemption list, so it will not en ### Description -The enforcement action to use for the gatekeeper module +The default enforcement action to use for the included constraints. `deny` will block the admission when violations to the policies are found, `warn` will show a message to the user but will admit the violating requests and `dryrun` won't give any feedback to the user but it will log the violations. ### Constraints @@ -3952,7 +4132,7 @@ The enforcement action to use for the gatekeeper module ### Description -If true, the default policies will be installed +Set to `false` to avoid installing the default Gatekeeper policies (constraints templates and constraints) included with the distribution. ## .spec.distribution.modules.policy.gatekeeper.overrides @@ -3967,7 +4147,7 @@ If true, the default policies will be installed ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.policy.gatekeeper.overrides.tolerations @@ -3982,7 +4162,7 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.policy.gatekeeper.overrides.tolerations.effect @@ -4030,17 +4210,21 @@ The value of the toleration | [overrides](#specdistributionmodulespolicykyvernooverrides) | `object` | Optional | | [validationFailureAction](#specdistributionmodulespolicykyvernovalidationfailureaction) | `string` | Required | +### Description + +Configuration for the Kyverno package. + ## .spec.distribution.modules.policy.kyverno.additionalExcludedNamespaces ### Description -This parameter adds namespaces to Kyverno's exemption list, so it will not enforce the constraints on them. +This parameter adds namespaces to Kyverno's exemption list, so it will not enforce the policies on them. ## .spec.distribution.modules.policy.kyverno.installDefaultPolicies ### Description -If true, the default policies will be installed +Set to `false` to avoid installing the default Kyverno policies included with distribution. ## .spec.distribution.modules.policy.kyverno.overrides @@ -4055,7 +4239,7 @@ If true, the default policies will be installed ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.policy.kyverno.overrides.tolerations @@ -4070,7 +4254,7 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.policy.kyverno.overrides.tolerations.effect @@ -4111,7 +4295,7 @@ The value of the toleration ### Description -The validation failure action to use for the kyverno module +The validation failure action to use for the policies, `Enforce` will block when a request does not comply with the policies and `Audit` will not block but log when a request does not comply with the policies. ### Constraints @@ -4132,13 +4316,17 @@ The validation failure action to use for the kyverno module | [nodeSelector](#specdistributionmodulespolicyoverridesnodeselector) | `object` | Optional | | [tolerations](#specdistributionmodulespolicyoverridestolerations) | `array` | Optional | +### Description + +Override the common configuration with a particular configuration for the module. + ## .spec.distribution.modules.policy.overrides.ingresses ## .spec.distribution.modules.policy.overrides.nodeSelector ### Description -The node selector to use to place the pods for the dr module +Set to override the node selector used to place the pods of the module. ## .spec.distribution.modules.policy.overrides.tolerations @@ -4153,7 +4341,7 @@ The node selector to use to place the pods for the dr module ### Description -The tolerations that will be added to the pods for the monitoring module +Set to override the tolerations that will be added to the pods of the module. ## .spec.distribution.modules.policy.overrides.tolerations.effect @@ -4194,7 +4382,9 @@ The value of the toleration ### Description -The type of security to use, either ***none***, ***gatekeeper*** or ***kyverno*** +The type of policy enforcement to use, either `none`, `gatekeeper` or `kyverno`. + +Default is `none`. ### Constraints @@ -4217,6 +4407,10 @@ The type of security to use, either ***none***, ***gatekeeper*** or ***kyverno** | [tempo](#specdistributionmodulestracingtempo) | `object` | Optional | | [type](#specdistributionmodulestracingtype) | `string` | Required | +### Description + +Configuration for the Tracing module. + ## .spec.distribution.modules.tracing.minio ### Properties @@ -4227,6 +4421,10 @@ The type of security to use, either ***none***, ***gatekeeper*** or ***kyverno** | [rootUser](#specdistributionmodulestracingminiorootuser) | `object` | Optional | | [storageSize](#specdistributionmodulestracingminiostoragesize) | `string` | Optional | +### Description + +Configuration for Tracing's MinIO deployment. + ## .spec.distribution.modules.tracing.minio.overrides ### Properties @@ -4240,7 +4438,7 @@ The type of security to use, either ***none***, ***gatekeeper*** or ***kyverno** ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.tracing.minio.overrides.tolerations @@ -4255,7 +4453,7 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.tracing.minio.overrides.tolerations.effect @@ -4305,19 +4503,19 @@ The value of the toleration ### Description -The password for the minio root user +The password for the default MinIO root user. ## .spec.distribution.modules.tracing.minio.rootUser.username ### Description -The username for the minio root user +The username for the default MinIO root user. ## .spec.distribution.modules.tracing.minio.storageSize ### Description -The storage size for the minio pods +The PVC size for each MinIO disk, 6 disks total. ## .spec.distribution.modules.tracing.overrides @@ -4329,13 +4527,17 @@ The storage size for the minio pods | [nodeSelector](#specdistributionmodulestracingoverridesnodeselector) | `object` | Optional | | [tolerations](#specdistributionmodulestracingoverridestolerations) | `array` | Optional | +### Description + +Override the common configuration with a particular configuration for the module. + ## .spec.distribution.modules.tracing.overrides.ingresses ## .spec.distribution.modules.tracing.overrides.nodeSelector ### Description -The node selector to use to place the pods for the dr module +Set to override the node selector used to place the pods of the module. ## .spec.distribution.modules.tracing.overrides.tolerations @@ -4350,7 +4552,7 @@ The node selector to use to place the pods for the dr module ### Description -The tolerations that will be added to the pods for the monitoring module +Set to override the tolerations that will be added to the pods of the module. ## .spec.distribution.modules.tracing.overrides.tolerations.effect @@ -4398,11 +4600,15 @@ The value of the toleration | [overrides](#specdistributionmodulestracingtempooverrides) | `object` | Optional | | [retentionTime](#specdistributionmodulestracingtemporetentiontime) | `string` | Optional | +### Description + +Configuration for the Tempo package. + ## .spec.distribution.modules.tracing.tempo.backend ### Description -The backend for the tempo pods, must be ***minio*** or ***externalEndpoint*** +The storage backend type for Tempo. `minio` will use an in-cluster MinIO deployment for object storage, `externalEndpoint` can be used to point to an external S3-compatible object storage instead of deploying an in-cluster MinIO. ### Constraints @@ -4425,35 +4631,39 @@ The backend for the tempo pods, must be ***minio*** or ***externalEndpoint*** | [insecure](#specdistributionmodulestracingtempoexternalendpointinsecure) | `boolean` | Optional | | [secretAccessKey](#specdistributionmodulestracingtempoexternalendpointsecretaccesskey) | `string` | Optional | +### Description + +Configuration for Tempo's external storage backend. + ## .spec.distribution.modules.tracing.tempo.externalEndpoint.accessKeyId ### Description -The access key id of the external tempo backend +The access key ID (username) for the external S3-compatible bucket. ## .spec.distribution.modules.tracing.tempo.externalEndpoint.bucketName ### Description -The bucket name of the external tempo backend +The bucket name of the external S3-compatible object storage. ## .spec.distribution.modules.tracing.tempo.externalEndpoint.endpoint ### Description -The endpoint of the external tempo backend +External S3-compatible endpoint for Tempo's storage. ## .spec.distribution.modules.tracing.tempo.externalEndpoint.insecure ### Description -If true, the external tempo backend will not use tls +If true, will use HTTP as protocol instead of HTTPS. ## .spec.distribution.modules.tracing.tempo.externalEndpoint.secretAccessKey ### Description -The secret access key of the external tempo backend +The secret access key (password) for the external S3-compatible bucket. ## .spec.distribution.modules.tracing.tempo.overrides @@ -4468,7 +4678,7 @@ The secret access key of the external tempo backend ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.tracing.tempo.overrides.tolerations @@ -4483,7 +4693,7 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.tracing.tempo.overrides.tolerations.effect @@ -4524,13 +4734,15 @@ The value of the toleration ### Description -The retention time for the tempo pods +The retention time for the traces stored in Tempo. ## .spec.distribution.modules.tracing.type ### Description -The type of tracing to use, either ***none*** or ***tempo*** +The type of tracing to use, either `none` or `tempo`. `none` will disable the Tracing module and `tempo` will install a Grafana Tempo deployment. + +Default is `tempo`. ### Constraints @@ -4543,6 +4755,10 @@ The type of tracing to use, either ***none*** or ***tempo*** ## .spec.distributionVersion +### Description + +Defines which KFD version will be installed and, in consequence, the Kubernetes version used to create the cluster. It supports git tags and branches. Example: `v1.30.1`. + ### Constraints **minimum length**: the minimum number of characters for this string is: `1` @@ -4566,7 +4782,7 @@ The type of tracing to use, either ***none*** or ***tempo*** ### Description -This key defines the VPC that will be created in AWS +Configuration for the VPC that will be created to host the EKS cluster and its related resources. If you already have a VPC that you want to use, leave this section empty and use `.spec.kubernetes.vpcId` instead. ## .spec.infrastructure.vpc.network @@ -4581,7 +4797,7 @@ This key defines the VPC that will be created in AWS ### Description -This is the CIDR of the VPC that will be created +The network CIDR for the VPC that will be created ### Constraints @@ -4602,11 +4818,15 @@ This is the CIDR of the VPC that will be created | [private](#specinfrastructurevpcnetworksubnetscidrsprivate) | `array` | Required | | [public](#specinfrastructurevpcnetworksubnetscidrspublic) | `array` | Required | +### Description + +Network CIDRS configuration for private and public subnets. + ## .spec.infrastructure.vpc.network.subnetsCidrs.private ### Description -These are the CIRDs for the private subnets, where the nodes, the pods, and the private load balancers will be created +Network CIRDs for the private subnets, where the nodes, the pods, and the private load balancers will be created ### Constraints @@ -4622,7 +4842,7 @@ These are the CIRDs for the private subnets, where the nodes, the pods, and the ### Description -These are the CIDRs for the public subnets, where the public load balancers and the VPN servers will be created +Network CIDRs for the public subnets, where the public load balancers and the VPN servers will be created ### Constraints @@ -4654,31 +4874,31 @@ These are the CIDRs for the public subnets, where the public load balancers and ### Description -This section defines the creation of VPN bastions +Configuration for the VPN server instances. ## .spec.infrastructure.vpn.bucketNamePrefix ### Description -This value defines the prefix that will be used to create the bucket name where the VPN servers will store the states +This value defines the prefix for the bucket name where the VPN servers will store their state (VPN certificates, users). ## .spec.infrastructure.vpn.dhParamsBits ### Description -The dhParamsBits size used for the creation of the .pem file that will be used in the dh openvpn server.conf file +The `dhParamsBits` size used for the creation of the .pem file that will be used in the dh openvpn server.conf file. ## .spec.infrastructure.vpn.diskSize ### Description -The size of the disk in GB +The size of the disk in GB for each VPN server. Example: entering `50` will create disks of 50 GB. ## .spec.infrastructure.vpn.iamUserNameOverride ### Description -Overrides the default IAM user name for the VPN +Overrides IAM user name for the VPN. Default is to use the cluster name. ### Constraints @@ -4694,25 +4914,25 @@ Overrides the default IAM user name for the VPN ### Description -The size of the AWS EC2 instance +The type of the AWS EC2 instance for each VPN server. Follows AWS EC2 nomenclature. Example: `t3-micro`. ## .spec.infrastructure.vpn.instances ### Description -The number of instances to create, 0 to skip the creation +The number of VPN server instances to create, `0` to skip the creation. ## .spec.infrastructure.vpn.operatorName ### Description -The username of the account to create in the bastion's operating system +The username of the account to create in the bastion's operating system. ## .spec.infrastructure.vpn.port ### Description -The port used by the OpenVPN server +The port where each OpenVPN server will listen for connections. ## .spec.infrastructure.vpn.ssh @@ -4728,7 +4948,7 @@ The port used by the OpenVPN server ### Description -The CIDR enabled in the security group that can access the bastions in SSH +The network CIDR enabled in the security group to access the VPN servers (bastions) via SSH. Setting this to `0.0.0.0/0` will allow any source. ### Constraints @@ -4744,7 +4964,7 @@ The CIDR enabled in the security group that can access the bastions in SSH ### Description -The github user name list that will be used to get the ssh public key that will be added as authorized key to the operatorName user +List of GitHub usernames from whom get their SSH public key and add as authorized keys of the `operatorName` user. ### Constraints @@ -4754,13 +4974,13 @@ The github user name list that will be used to get the ssh public key that will ### Description -This value defines the public keys that will be added to the bastion's operating system NOTES: Not yet implemented +**NOT IN USE**, use `githubUsersName` instead. This value defines the public keys that will be added to the bastion's operating system. ## .spec.infrastructure.vpn.vpcId ### Description -The VPC ID where the VPN servers will be created, required only if .spec.infrastructure.vpc is omitted +The ID of the VPC where the VPN server instances will be created, required only if `.spec.infrastructure.vpc` is omitted. ### Constraints @@ -4776,7 +4996,7 @@ The VPC ID where the VPN servers will be created, required only if .spec.infrast ### Description -The CIDR that will be used to assign IP addresses to the VPN clients when connected +The network CIDR that will be used to assign IP addresses to the VPN clients when connected. ### Constraints @@ -4808,6 +5028,10 @@ The CIDR that will be used to assign IP addresses to the VPN clients when connec | [vpcId](#speckubernetesvpcid) | `string` | Optional | | [workersIAMRoleNamePrefixOverride](#speckubernetesworkersiamrolenameprefixoverride) | `string` | Optional | +### Description + +Defines the Kubernetes components configuration and the values needed for the `kubernetes` phase of furyctl. + ## .spec.kubernetes.apiServer ### Properties @@ -4823,13 +5047,13 @@ The CIDR that will be used to assign IP addresses to the VPN clients when connec ### Description -This value defines if the API server will be accessible only from the private subnets +This value defines if the Kubernetes API server will be accessible from the private subnets. Default it `true`. ## .spec.kubernetes.apiServer.privateAccessCidrs ### Description -This value defines the CIDRs that will be allowed to access the API server from the private subnets +The network CIDRs from the private subnets that will be allowed access the Kubernetes API server. ### Constraints @@ -4845,13 +5069,13 @@ This value defines the CIDRs that will be allowed to access the API server from ### Description -This value defines if the API server will be accessible from the public subnets +This value defines if the Kubernetes API server will be accessible from the public subnets. Default is `false`. ## .spec.kubernetes.apiServer.publicAccessCidrs ### Description -This value defines the CIDRs that will be allowed to access the API server from the public subnets +The network CIDRs from the public subnets that will be allowed access the Kubernetes API server. ### Constraints @@ -4873,11 +5097,17 @@ This value defines the CIDRs that will be allowed to access the API server from | [roles](#speckubernetesawsauthroles) | `array` | Optional | | [users](#speckubernetesawsauthusers) | `array` | Optional | +### Description + +Optional additional security configuration for EKS IAM via the `aws-auth` configmap. + +Ref: https://docs.aws.amazon.com/eks/latest/userguide/auth-configmap.html + ## .spec.kubernetes.awsAuth.additionalAccounts ### Description -This optional array defines additional AWS accounts that will be added to the aws-auth configmap +This optional array defines additional AWS accounts that will be added to the `aws-auth` configmap. ## .spec.kubernetes.awsAuth.roles @@ -4891,7 +5121,7 @@ This optional array defines additional AWS accounts that will be added to the aw ### Description -This optional array defines additional IAM roles that will be added to the aws-auth configmap +This optional array defines additional IAM roles that will be added to the `aws-auth` configmap. ## .spec.kubernetes.awsAuth.roles.groups @@ -4921,7 +5151,7 @@ This optional array defines additional IAM roles that will be added to the aws-a ### Description -This optional array defines additional IAM users that will be added to the aws-auth configmap +This optional array defines additional IAM users that will be added to the `aws-auth` configmap. ## .spec.kubernetes.awsAuth.users.groups @@ -4943,7 +5173,7 @@ This optional array defines additional IAM users that will be added to the aws-a ### Description -Overrides the default IAM role name prefix for the EKS cluster +Overrides the default prefix for the IAM role name of the EKS cluster. If not set, a name will be generated from the cluster name. ### Constraints @@ -4959,7 +5189,37 @@ Overrides the default IAM role name prefix for the EKS cluster ### Description -Optional Kubernetes Cluster log retention in days. Defaults to 90 days. +Optional Kubernetes Cluster log retention in CloudWatch, expressed in days. Setting the value to zero (`0`) makes retention last forever. Default is `90` days. + +### Constraints + +**enum**: the value of this property must be equal to one of the following integer values: + +| Value | +|:----| +|0 | +|1 | +|3 | +|5 | +|7 | +|14 | +|30 | +|60 | +|90 | +|120 | +|150 | +|180 | +|365 | +|400 | +|545 | +|731 | +|1096| +|1827| +|2192| +|2557| +|2922| +|3288| +|3653| ## .spec.kubernetes.logsTypes @@ -4983,7 +5243,7 @@ Optional list of Kubernetes Cluster log types to enable. Defaults to all types. ### Description -This key contains the ssh public key that can connect to the nodes via SSH using the ec2-user user +The SSH public key that can connect to the nodes via SSH using the `ec2-user` user. Example: the contents of your `~/.ssh/id_ras.pub` file. ## .spec.kubernetes.nodePoolGlobalAmiType @@ -5019,6 +5279,10 @@ Global default AMI type used for EKS worker nodes. This will apply to all node p | [taints](#speckubernetesnodepoolstaints) | `array` | Optional | | [type](#speckubernetesnodepoolstype) | `string` | Required | +### Description + +Array with all the node pool definitions that will join the cluster. Each item is an object. + ## .spec.kubernetes.nodePools.additionalFirewallRules ### Properties @@ -5029,6 +5293,10 @@ Global default AMI type used for EKS worker nodes. This will apply to all node p | [self](#speckubernetesnodepoolsadditionalfirewallrulesself) | `array` | Optional | | [sourceSecurityGroupId](#speckubernetesnodepoolsadditionalfirewallrulessourcesecuritygroupid) | `array` | Optional | +### Description + +Optional additional firewall rules that will be attached to the nodes. + ## .spec.kubernetes.nodePools.additionalFirewallRules.cidrBlocks ### Properties @@ -5044,10 +5312,12 @@ Global default AMI type used for EKS worker nodes. This will apply to all node p ### Description -The CIDR blocks for the FW rule. At the moment the first item of the list will be used, others will be ignored. +The CIDR blocks objects definition for the Firewall rule. Even though it is a list, only one item is currently supported. See https://github.com/sighupio/fury-eks-installer/issues/46 for more details. ### Constraints +**maximum number of items**: the maximum number of items for this array is: `1` + **minimum number of items**: the minimum number of items for this array is: `1` ## .spec.kubernetes.nodePools.additionalFirewallRules.cidrBlocks.cidrBlocks @@ -5075,6 +5345,10 @@ The CIDR blocks for the FW rule. At the moment the first item of the list will b | [from](#speckubernetesnodepoolsadditionalfirewallrulescidrblocksportsfrom) | `integer` | Required | | [to](#speckubernetesnodepoolsadditionalfirewallrulescidrblocksportsto) | `integer` | Required | +### Description + +Port range for the Firewall Rule. + ## .spec.kubernetes.nodePools.additionalFirewallRules.cidrBlocks.ports.from ## .spec.kubernetes.nodePools.additionalFirewallRules.cidrBlocks.ports.to @@ -5093,8 +5367,16 @@ The CIDR blocks for the FW rule. At the moment the first item of the list will b ## .spec.kubernetes.nodePools.additionalFirewallRules.cidrBlocks.tags +### Description + +Additional AWS tags for the Firewall rule. + ## .spec.kubernetes.nodePools.additionalFirewallRules.cidrBlocks.type +### Description + +The type of the Firewall rule, can be `ingress` for incoming traffic or `egress` for outgoing traffic. + ### Constraints **enum**: the value of this property must be equal to one of the following string values: @@ -5125,7 +5407,7 @@ The CIDR blocks for the FW rule. At the moment the first item of the list will b ### Description -The name of the FW rule +The name of the Firewall rule. ## .spec.kubernetes.nodePools.additionalFirewallRules.self.ports @@ -5136,6 +5418,10 @@ The name of the FW rule | [from](#speckubernetesnodepoolsadditionalfirewallrulesselfportsfrom) | `integer` | Required | | [to](#speckubernetesnodepoolsadditionalfirewallrulesselfportsto) | `integer` | Required | +### Description + +Port range for the Firewall Rule. + ## .spec.kubernetes.nodePools.additionalFirewallRules.self.ports.from ## .spec.kubernetes.nodePools.additionalFirewallRules.self.ports.to @@ -5144,7 +5430,7 @@ The name of the FW rule ### Description -The protocol of the FW rule +The protocol of the Firewall rule. ### Constraints @@ -5160,19 +5446,19 @@ The protocol of the FW rule ### Description -If true, the source will be the security group itself +If `true`, the source will be the security group itself. ## .spec.kubernetes.nodePools.additionalFirewallRules.self.tags ### Description -The tags of the FW rule +Additional AWS tags for the Firewall rule. ## .spec.kubernetes.nodePools.additionalFirewallRules.self.type ### Description -The type of the FW rule can be ingress or egress +The type of the Firewall rule, can be `ingress` for incoming traffic or `egress` for outgoing traffic. ### Constraints @@ -5204,7 +5490,7 @@ The type of the FW rule can be ingress or egress ### Description -The name of the FW rule +The name for the additional Firewall rule Security Group. ## .spec.kubernetes.nodePools.additionalFirewallRules.sourceSecurityGroupId.ports @@ -5215,6 +5501,10 @@ The name of the FW rule | [from](#speckubernetesnodepoolsadditionalfirewallrulessourcesecuritygroupidportsfrom) | `integer` | Required | | [to](#speckubernetesnodepoolsadditionalfirewallrulessourcesecuritygroupidportsto) | `integer` | Required | +### Description + +Port range for the Firewall Rule. + ## .spec.kubernetes.nodePools.additionalFirewallRules.sourceSecurityGroupId.ports.from ## .spec.kubernetes.nodePools.additionalFirewallRules.sourceSecurityGroupId.ports.to @@ -5223,7 +5513,7 @@ The name of the FW rule ### Description -The protocol of the FW rule +The protocol of the Firewall rule. ### Constraints @@ -5239,19 +5529,19 @@ The protocol of the FW rule ### Description -The source security group ID +The source security group ID. ## .spec.kubernetes.nodePools.additionalFirewallRules.sourceSecurityGroupId.tags ### Description -The tags of the FW rule +Additional AWS tags for the Firewall rule. ## .spec.kubernetes.nodePools.additionalFirewallRules.sourceSecurityGroupId.type ### Description -The type of the FW rule can be ingress or egress +The type of the Firewall rule, can be `ingress` for incoming traffic or `egress` for outgoing traffic. ### Constraints @@ -5309,7 +5599,7 @@ The AMI type defines the AMI to use for `eks-managed` and `self-managed` type of ### Description -This optional array defines additional target groups to attach to the instances in the node pool +This optional array defines additional target groups to attach to the instances in the node pool. ### Constraints @@ -5325,7 +5615,7 @@ This optional array defines additional target groups to attach to the instances ### Description -The container runtime to use for the nodes +The container runtime to use in the nodes of the node pool. Default is `containerd`. ### Constraints @@ -5348,28 +5638,42 @@ The container runtime to use for the nodes | [volumeSize](#speckubernetesnodepoolsinstancevolumesize) | `integer` | Optional | | [volumeType](#speckubernetesnodepoolsinstancevolumetype) | `string` | Optional | +### Description + +Configuration for the instances that will be used in the node pool. + ## .spec.kubernetes.nodePools.instance.maxPods +### Description + +Set the maximum pods per node to a custom value. If not set will use EKS default value that depends on the instance type. + +Ref: https://github.com/awslabs/amazon-eks-ami/blob/main/templates/shared/runtime/eni-max-pods.txt + ## .spec.kubernetes.nodePools.instance.spot ### Description -If true, the nodes will be created as spot instances +If `true`, the nodes will be created as spot instances. Default is `false`. ## .spec.kubernetes.nodePools.instance.type ### Description -The instance type to use for the nodes +The instance type to use for the nodes. ## .spec.kubernetes.nodePools.instance.volumeSize ### Description -The size of the disk in GB +The size of the disk in GB. ## .spec.kubernetes.nodePools.instance.volumeType +### Description + +Volume type for the instance disk. Default is `gp2`. + ### Constraints **enum**: the value of this property must be equal to one of the following string values: @@ -5385,7 +5689,7 @@ The size of the disk in GB ### Description -Kubernetes labels that will be added to the nodes +Kubernetes labels that will be added to the nodes. ## .spec.kubernetes.nodePools.name @@ -5406,19 +5710,19 @@ The name of the node pool. ### Description -The maximum number of nodes in the node pool +The maximum number of nodes in the node pool. ## .spec.kubernetes.nodePools.size.min ### Description -The minimum number of nodes in the node pool +The minimum number of nodes in the node pool. ## .spec.kubernetes.nodePools.subnetIds ### Description -This value defines the subnet IDs where the nodes will be created +Optional list of subnet IDs where to create the nodes. ### Constraints @@ -5434,7 +5738,7 @@ This value defines the subnet IDs where the nodes will be created ### Description -AWS tags that will be added to the ASG and EC2 instances +AWS tags that will be added to the ASG and EC2 instances. ## .spec.kubernetes.nodePools.taints @@ -5467,7 +5771,7 @@ The type of Node Pool, can be `self-managed` for using customization like custom ### Description -Either `launch_configurations`, `launch_templates` or `both`. For new clusters use `launch_templates`, for existing cluster you'll need to migrate from `launch_configurations` to `launch_templates` using `both` as interim. +Accepted values are `launch_configurations`, `launch_templates` or `both`. For new clusters use `launch_templates`, for adopting existing cluster you'll need to migrate from `launch_configurations` to `launch_templates` using `both` as interim. ### Constraints @@ -5483,7 +5787,7 @@ Either `launch_configurations`, `launch_templates` or `both`. For new clusters u ### Description -This value defines the CIDR that will be used to assign IP addresses to the services +This value defines the network CIDR that will be used to assign IP addresses to Kubernetes services. ### Constraints @@ -5499,7 +5803,7 @@ This value defines the CIDR that will be used to assign IP addresses to the serv ### Description -This value defines the subnet IDs where the EKS cluster will be created, required only if .spec.infrastructure.vpc is omitted +Required only if `.spec.infrastructure.vpc` is omitted. This value defines the ID of the subnet where the EKS cluster will be created. ### Constraints @@ -5515,7 +5819,7 @@ This value defines the subnet IDs where the EKS cluster will be created, require ### Description -This value defines the VPC ID where the EKS cluster will be created, required only if .spec.infrastructure.vpc is omitted +Required only if `.spec.infrastructure.vpc` is omitted. This value defines the ID of the VPC where the EKS cluster and its related resources will be created. ### Constraints @@ -5531,7 +5835,7 @@ This value defines the VPC ID where the EKS cluster will be created, required on ### Description -Overrides the default IAM role name prefix for the EKS workers +Overrides the default prefix for the IAM role name of the EKS workers. If not set, a name will be generated from the cluster name. ### Constraints @@ -5676,6 +5980,10 @@ The name of the kustomize plugin ## .spec.region +### Description + +Defines in which AWS region the cluster and all the related resources will be created. + ### Constraints **enum**: the value of this property must be equal to one of the following string values: @@ -5726,6 +6034,10 @@ This map defines which will be the common tags that will be added to all the res |:----------------------------------------------|:---------|:---------| | [terraform](#spectoolsconfigurationterraform) | `object` | Required | +### Description + +Configuration for tools used by furyctl, like Terraform. + ## .spec.toolsConfiguration.terraform ### Properties @@ -5742,6 +6054,10 @@ This map defines which will be the common tags that will be added to all the res |:----------------------------------------------|:---------|:---------| | [s3](#spectoolsconfigurationterraformstates3) | `object` | Required | +### Description + +Configuration for storing the Terraform state of the cluster. + ## .spec.toolsConfiguration.terraform.state.s3 ### Properties @@ -5753,17 +6069,21 @@ This map defines which will be the common tags that will be added to all the res | [region](#spectoolsconfigurationterraformstates3region) | `string` | Required | | [skipRegionValidation](#spectoolsconfigurationterraformstates3skipregionvalidation) | `boolean` | Optional | +### Description + +Configuration for the S3 bucket used to store the Terraform state. + ## .spec.toolsConfiguration.terraform.state.s3.bucketName ### Description -This value defines which bucket will be used to store all the states +This value defines which bucket will be used to store all the states. ## .spec.toolsConfiguration.terraform.state.s3.keyPrefix ### Description -This value defines which folder will be used to store all the states inside the bucket +This value defines which folder will be used to store all the states inside the bucket. ### Constraints @@ -5781,7 +6101,7 @@ This value defines which folder will be used to store all the states inside the ### Description -This value defines in which region the bucket is located +This value defines in which region the bucket is located. ### Constraints @@ -5823,5 +6143,5 @@ This value defines in which region the bucket is located ### Description -This value defines if the region of the bucket should be validated or not by Terraform, useful when using a bucket in a recently added region +This value defines if the region of the bucket should be validated or not by Terraform, useful when using a bucket in a recently added region. diff --git a/docs/schemas/kfddistribution-kfd-v1alpha2.md b/docs/schemas/kfddistribution-kfd-v1alpha2.md index b663177e0..095a35e79 100644 --- a/docs/schemas/kfddistribution-kfd-v1alpha2.md +++ b/docs/schemas/kfddistribution-kfd-v1alpha2.md @@ -2,8 +2,14 @@ This document explains the full schema for the `kind: KFDDistribution` for the `furyctl.yaml` file used by `furyctl`. This configuration file will be used to deploy the Kubernetes Fury Distribution modules on top of an existing Kubernetes cluster. -An example file can be found [here](https://github.com/sighupio/fury-distribution/blob/feature/schema-docs/templates/config/kfddistribution-kfd-v1alpha2.yaml.tpl). +An example configuration file can be created by running the following command: +```bash +furyctl create config --kind KFDDistribution --version v1.29.4 --name example-cluster +``` + +> [!NOTE] +> Replace the version with your desired version of KFD. ## Properties | Property | Type | Required | @@ -13,6 +19,10 @@ An example file can be found [here](https://github.com/sighupio/fury-distributio | [metadata](#metadata) | `object` | Required | | [spec](#spec) | `object` | Required | +### Description + +KFD modules deployed on top of an existing Kubernetes cluster. + ## .apiVersion ### Constraints @@ -45,6 +55,10 @@ An example file can be found [here](https://github.com/sighupio/fury-distributio ## .metadata.name +### Description + +The name of the cluster. It will also be used as a prefix for all the other resources created. + ### Constraints **maximum length**: the maximum number of characters for this string is: `56` @@ -84,11 +98,15 @@ An example file can be found [here](https://github.com/sighupio/fury-distributio | [relativeVendorPath](#specdistributioncommonrelativevendorpath) | `string` | Optional | | [tolerations](#specdistributioncommontolerations) | `array` | Optional | +### Description + +Common configuration for all the distribution modules. + ## .spec.distribution.common.nodeSelector ### Description -The node selector to use to place the pods for all the KFD modules +The node selector to use to place the pods for all the KFD modules. Follows Kubernetes selector format. Example: `node.kubernetes.io/role: infra`. ## .spec.distribution.common.provider @@ -102,13 +120,13 @@ The node selector to use to place the pods for all the KFD modules ### Description -The type of the provider +The provider type. Don't set. FOR INTERNAL USE ONLY. ## .spec.distribution.common.registry ### Description -URL of the registry where to pull images from for the Distribution phase. (Default is registry.sighup.io/fury). +URL of the registry where to pull images from for the Distribution phase. (Default is `registry.sighup.io/fury`). NOTE: If plugins are pulling from the default registry, the registry will be replaced for the plugin too. @@ -116,7 +134,7 @@ NOTE: If plugins are pulling from the default registry, the registry will be rep ### Description -The relative path to the vendor directory, does not need to be changed +The relative path to the vendor directory, does not need to be changed. ## .spec.distribution.common.tolerations @@ -131,7 +149,13 @@ The relative path to the vendor directory, does not need to be changed ### Description -The tolerations that will be added to the pods for all the KFD modules +An array with the tolerations that will be added to the pods for all the KFD modules. Follows Kubernetes tolerations format. Example: + +```yaml +- effect: NoSchedule + key: node.kubernetes.io/role + value: infra +``` ## .spec.distribution.common.tolerations.effect @@ -493,7 +517,7 @@ The type of the secret ### Description -The kubeconfig file path +The path to the kubeconfig file. ## .spec.distribution.modules @@ -522,11 +546,15 @@ The kubeconfig file path | [pomerium](#specdistributionmodulesauthpomerium) | `object` | Optional | | [provider](#specdistributionmodulesauthprovider) | `object` | Required | +### Description + +Configuration for the Auth module. + ## .spec.distribution.modules.auth.baseDomain ### Description -The base domain for the auth module +Base domain for the ingresses created by the Auth module (Gangplank, Pomerium, Dex). Notice that when nginx type is dual, these will use the `external` ingress class. ## .spec.distribution.modules.auth.dex @@ -539,17 +567,32 @@ The base domain for the auth module | [expiry](#specdistributionmodulesauthdexexpiry) | `object` | Optional | | [overrides](#specdistributionmodulesauthdexoverrides) | `object` | Optional | +### Description + +Configuration for the Dex package. + ## .spec.distribution.modules.auth.dex.additionalStaticClients ### Description -The additional static clients for dex +Additional static clients defitions that will be added to the default clients included with the distribution in Dex's configuration. Example: + +```yaml +additionalStaticClients: + - id: my-custom-client + name: "A custom additional static client" + redirectURIs: + - "https://myapp.tld/redirect" + - "https://alias.tld/oidc-callback" + secret: supersecretpassword +``` +Reference: https://dexidp.io/docs/connectors/local/ ## .spec.distribution.modules.auth.dex.connectors ### Description -The connectors for dex +A list with each item defining a Dex connector. Follows Dex connectors configuration format: https://dexidp.io/docs/connectors/ ## .spec.distribution.modules.auth.dex.expiry @@ -585,7 +628,7 @@ Dex signing key expiration time duration (default 6h). ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.auth.dex.overrides.tolerations @@ -600,7 +643,7 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.auth.dex.overrides.tolerations.effect @@ -647,13 +690,17 @@ The value of the toleration | [nodeSelector](#specdistributionmodulesauthoverridesnodeselector) | `object` | Optional | | [tolerations](#specdistributionmodulesauthoverridestolerations) | `array` | Optional | +### Description + +Override the common configuration with a particular configuration for the Auth module. + ## .spec.distribution.modules.auth.overrides.ingresses ## .spec.distribution.modules.auth.overrides.nodeSelector ### Description -The node selector to use to place the pods for the auth module +Set to override the node selector used to place the pods of the Auth module. ## .spec.distribution.modules.auth.overrides.tolerations @@ -668,7 +715,7 @@ The node selector to use to place the pods for the auth module ### Description -The tolerations that will be added to the pods for the auth module +Set to override the tolerations that will be added to the pods of the Auth module. ## .spec.distribution.modules.auth.overrides.tolerations.effect @@ -892,23 +939,32 @@ cat ec_private.pem | base64 | [password](#specdistributionmodulesauthproviderbasicauthpassword) | `string` | Required | | [username](#specdistributionmodulesauthproviderbasicauthusername) | `string` | Required | +### Description + +Configuration for the HTTP Basic Auth provider. + ## .spec.distribution.modules.auth.provider.basicAuth.password ### Description -The password for the basic auth +The password for logging in with the HTTP basic authentication. ## .spec.distribution.modules.auth.provider.basicAuth.username ### Description -The username for the basic auth +The username for logging in with the HTTP basic authentication. ## .spec.distribution.modules.auth.provider.type ### Description -The type of the provider, must be ***none***, ***sso*** or ***basicAuth*** +The type of the Auth provider, options are: +- `none`: will disable authentication in the infrastructural ingresses. +- `sso`: will protect the infrastructural ingresses with Pomerium and Dex (SSO) and require authentication before accessing them. +- `basicAuth`: will protect the infrastructural ingresses with HTTP basic auth (username and password) authentication. + +Default is `none`. ### Constraints @@ -930,6 +986,10 @@ The type of the provider, must be ***none***, ***sso*** or ***basicAuth*** | [type](#specdistributionmodulesdrtype) | `string` | Required | | [velero](#specdistributionmodulesdrvelero) | `object` | Optional | +### Description + +Configuration for the Disaster Recovery module. + ## .spec.distribution.modules.dr.overrides ### Properties @@ -940,13 +1000,17 @@ The type of the provider, must be ***none***, ***sso*** or ***basicAuth*** | [nodeSelector](#specdistributionmodulesdroverridesnodeselector) | `object` | Optional | | [tolerations](#specdistributionmodulesdroverridestolerations) | `array` | Optional | +### Description + +Override the common configuration with a particular configuration for the module. + ## .spec.distribution.modules.dr.overrides.ingresses ## .spec.distribution.modules.dr.overrides.nodeSelector ### Description -The node selector to use to place the pods for the security module +Set to override the node selector used to place the pods of the module. ## .spec.distribution.modules.dr.overrides.tolerations @@ -961,7 +1025,7 @@ The node selector to use to place the pods for the security module ### Description -The tolerations that will be added to the pods for the monitoring module +Set to override the tolerations that will be added to the pods of the module. ## .spec.distribution.modules.dr.overrides.tolerations.effect @@ -1002,7 +1066,9 @@ The value of the toleration ### Description -The type of the DR, must be ***none*** or ***on-premises*** +The type of the Disaster Recovery, must be `none` or `on-premises`. `none` disables the module and `on-premises` will install Velero and an optional MinIO deployment. + +Default is `none`. ### Constraints @@ -1025,6 +1091,10 @@ The type of the DR, must be ***none*** or ***on-premises*** | [schedules](#specdistributionmodulesdrveleroschedules) | `object` | Optional | | [snapshotController](#specdistributionmodulesdrvelerosnapshotcontroller) | `object` | Optional | +### Description + +Configuration for the Velero package. + ## .spec.distribution.modules.dr.velero.backend ### Description @@ -1099,7 +1169,7 @@ The secret access key (password) for the external S3-compatible bucket. ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.dr.velero.overrides.tolerations @@ -1114,7 +1184,7 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.dr.velero.overrides.tolerations.effect @@ -1274,7 +1344,7 @@ Whether to install or not the snapshotController component in the cluster. Befor ### Description -the base domain used for all the KFD ingresses, if in the nginx dual configuration, it should be the same as the .spec.distribution.modules.ingress.dns.private.name zone +The base domain used for all the KFD infrastructural ingresses. If using the nginx dual type, this value should be the same as the domain associated with the `internal` ingress class. ## .spec.distribution.modules.ingress.certManager @@ -1285,6 +1355,10 @@ the base domain used for all the KFD ingresses, if in the nginx dual configurati | [clusterIssuer](#specdistributionmodulesingresscertmanagerclusterissuer) | `object` | Required | | [overrides](#specdistributionmodulesingresscertmanageroverrides) | `object` | Optional | +### Description + +Configuration for the cert-manager package. Required even if `ingress.nginx.type` is `none`, cert-manager is used for managing other certificates in the cluster besides the TLS termination certificates for the ingresses. + ## .spec.distribution.modules.ingress.certManager.clusterIssuer ### Properties @@ -1296,29 +1370,33 @@ the base domain used for all the KFD ingresses, if in the nginx dual configurati | [solvers](#specdistributionmodulesingresscertmanagerclusterissuersolvers) | `array` | Optional | | [type](#specdistributionmodulesingresscertmanagerclusterissuertype) | `string` | Optional | +### Description + +Configuration for the cert-manager's ACME clusterIssuer used to request certificates from Let's Encrypt. + ## .spec.distribution.modules.ingress.certManager.clusterIssuer.email ### Description -The email of the cluster issuer +The email address to use during the certificate issuing process. ## .spec.distribution.modules.ingress.certManager.clusterIssuer.name ### Description -The name of the cluster issuer +Name of the clusterIssuer. ## .spec.distribution.modules.ingress.certManager.clusterIssuer.solvers ### Description -The custom solvers configurations +List of challenge solvers to use instead of the default one for the `http01` challenge. ## .spec.distribution.modules.ingress.certManager.clusterIssuer.type ### Description -The type of the cluster issuer, must be ***http01*** +The type of the clusterIssuer. Only `http01` challenge is supported for KFDDistribution kind. See solvers for arbitrary configurations. ### Constraints @@ -1341,7 +1419,7 @@ The type of the cluster issuer, must be ***http01*** ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.ingress.certManager.overrides.tolerations @@ -1356,7 +1434,7 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.ingress.certManager.overrides.tolerations.effect @@ -1414,7 +1492,7 @@ The value of the toleration ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.ingress.forecastle.overrides.tolerations @@ -1429,7 +1507,7 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.ingress.forecastle.overrides.tolerations.effect @@ -1478,7 +1556,7 @@ The value of the toleration ### Description -Configurations for the nginx ingress controller module +Configurations for the Ingress nginx controller package. ## .spec.distribution.modules.ingress.nginx.overrides @@ -1493,7 +1571,7 @@ Configurations for the nginx ingress controller module ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.ingress.nginx.overrides.tolerations @@ -1508,7 +1586,7 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.ingress.nginx.overrides.tolerations.effect @@ -1558,7 +1636,7 @@ The value of the toleration ### Description -The provider of the TLS certificate, must be ***none***, ***certManager*** or ***secret*** +The provider of the TLS certificates for the ingresses, one of: `none`, `certManager`, or `secret`. ### Constraints @@ -1580,21 +1658,38 @@ The provider of the TLS certificate, must be ***none***, ***certManager*** or ** | [cert](#specdistributionmodulesingressnginxtlssecretcert) | `string` | Required | | [key](#specdistributionmodulesingressnginxtlssecretkey) | `string` | Required | +### Description + +Kubernetes TLS secret for the ingresses TLS certificate. + ## .spec.distribution.modules.ingress.nginx.tls.secret.ca +### Description + +The Certificate Authority certificate file's content. You can use the `"{file://}"` notation to get the content from a file. + ## .spec.distribution.modules.ingress.nginx.tls.secret.cert ### Description -The certificate file content or you can use the file notation to get the content from a file +The certificate file's content. You can use the `"{file://}"` notation to get the content from a file. ## .spec.distribution.modules.ingress.nginx.tls.secret.key +### Description + +The signing key file's content. You can use the `"{file://}"` notation to get the content from a file. + ## .spec.distribution.modules.ingress.nginx.type ### Description -The type of the nginx ingress controller, must be ***none***, ***single*** or ***dual*** +The type of the Ingress nginx controller, options are: +- `none`: no ingress controller will be installed and no infrastructural ingresses will be created. +- `single`: a single ingress controller with ingress class `nginx` will be installed to manage all the ingress resources, infrastructural ingresses will be created. +- `dual`: two independent ingress controllers will be installed, one for the `internal` ingress class intended for private ingresses and one for the `external` ingress class intended for public ingresses. KFD infrastructural ingresses wil use the `internal` ingress class when using the dual type. + +Default is `single`. ### Constraints @@ -1616,6 +1711,10 @@ The type of the nginx ingress controller, must be ***none***, ***single*** or ** | [nodeSelector](#specdistributionmodulesingressoverridesnodeselector) | `object` | Optional | | [tolerations](#specdistributionmodulesingressoverridestolerations) | `array` | Optional | +### Description + +Override the common configuration with a particular configuration for the Ingress module. + ## .spec.distribution.modules.ingress.overrides.ingresses ### Properties @@ -1638,25 +1737,25 @@ The type of the nginx ingress controller, must be ***none***, ***single*** or ** ### Description -If true, the ingress will not have authentication +If true, the ingress will not have authentication even if `.spec.modules.auth.provider.type` is SSO or Basic Auth. ## .spec.distribution.modules.ingress.overrides.ingresses.forecastle.host ### Description -The host of the ingress +Use this host for the ingress instead of the default one. ## .spec.distribution.modules.ingress.overrides.ingresses.forecastle.ingressClass ### Description -The ingress class of the ingress +Use this ingress class for the ingress instead of the default one. ## .spec.distribution.modules.ingress.overrides.nodeSelector ### Description -The node selector to use to place the pods for the ingress module +Set to override the node selector used to place the pods of the Ingress module. ## .spec.distribution.modules.ingress.overrides.tolerations @@ -1671,7 +1770,7 @@ The node selector to use to place the pods for the ingress module ### Description -The tolerations that will be added to the pods for the ingress module +Set to override the tolerations that will be added to the pods of the Ingress module. ## .spec.distribution.modules.ingress.overrides.tolerations.effect @@ -1723,6 +1822,10 @@ The value of the toleration | [overrides](#specdistributionmodulesloggingoverrides) | `object` | Optional | | [type](#specdistributionmodulesloggingtype) | `string` | Required | +### Description + +Configuration for the Logging module. + ## .spec.distribution.modules.logging.cerebro ### Properties @@ -1731,6 +1834,10 @@ The value of the toleration |:-------------------------------------------------------------|:---------|:---------| | [overrides](#specdistributionmodulesloggingcerebrooverrides) | `object` | Optional | +### Description + +DEPRECATED since KFD v1.26.6, 1.27.5, v1.28.0. + ## .spec.distribution.modules.logging.cerebro.overrides ### Properties @@ -1744,7 +1851,7 @@ The value of the toleration ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.logging.cerebro.overrides.tolerations @@ -1759,7 +1866,7 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.logging.cerebro.overrides.tolerations.effect @@ -1813,55 +1920,55 @@ The value of the toleration ### Description -when using the customOutputs logging type, you need to manually specify the spec of the several Output and ClusterOutputs that the Logging Operator expects to forward the logs collected by the pre-defined flows. +When using the `customOutputs` logging type, you need to manually specify the spec of the several `Output` and `ClusterOutputs` that the Logging Operator expects to forward the logs collected by the pre-defined flows. ## .spec.distribution.modules.logging.customOutputs.audit ### Description -This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow. +This value defines where the output from the `audit` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}` ## .spec.distribution.modules.logging.customOutputs.errors ### Description -This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow. +This value defines where the output from the `errors` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}` ## .spec.distribution.modules.logging.customOutputs.events ### Description -This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow. +This value defines where the output from the `events` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}` ## .spec.distribution.modules.logging.customOutputs.infra ### Description -This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow. +This value defines where the output from the `infra` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}` ## .spec.distribution.modules.logging.customOutputs.ingressNginx ### Description -This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow. +This value defines where the output from the `ingressNginx` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}` ## .spec.distribution.modules.logging.customOutputs.kubernetes ### Description -This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow. +This value defines where the output from the `kubernetes` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}` ## .spec.distribution.modules.logging.customOutputs.systemdCommon ### Description -This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow. +This value defines where the output from the `systemdCommon` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}` ## .spec.distribution.modules.logging.customOutputs.systemdEtcd ### Description -This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow. +This value defines where the output from the `systemdEtcd` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}` ## .spec.distribution.modules.logging.loki @@ -1874,8 +1981,16 @@ This value defines where the output from Flow will be sent. Will be the `spec` s | [resources](#specdistributionmoduleslogginglokiresources) | `object` | Optional | | [tsdbStartDate](#specdistributionmoduleslogginglokitsdbstartdate) | `string` | Required | +### Description + +Configuration for the Loki package. + ## .spec.distribution.modules.logging.loki.backend +### Description + +The storage backend type for Loki. `minio` will use an in-cluster MinIO deployment for object storage, `externalEndpoint` can be used to point to an external object storage instead of deploying an in-cluster MinIO. + ### Constraints **enum**: the value of this property must be equal to one of the following string values: @@ -1897,35 +2012,39 @@ This value defines where the output from Flow will be sent. Will be the `spec` s | [insecure](#specdistributionmoduleslogginglokiexternalendpointinsecure) | `boolean` | Optional | | [secretAccessKey](#specdistributionmoduleslogginglokiexternalendpointsecretaccesskey) | `string` | Optional | +### Description + +Configuration for Loki's external storage backend. + ## .spec.distribution.modules.logging.loki.externalEndpoint.accessKeyId ### Description -The access key id of the loki external endpoint +The access key ID (username) for the external S3-compatible bucket. ## .spec.distribution.modules.logging.loki.externalEndpoint.bucketName ### Description -The bucket name of the loki external endpoint +The bucket name of the external S3-compatible object storage. ## .spec.distribution.modules.logging.loki.externalEndpoint.endpoint ### Description -The endpoint of the loki external endpoint +External S3-compatible endpoint for Loki's storage. ## .spec.distribution.modules.logging.loki.externalEndpoint.insecure ### Description -If true, the loki external endpoint will be insecure +If true, will use HTTP as protocol instead of HTTPS. ## .spec.distribution.modules.logging.loki.externalEndpoint.secretAccessKey ### Description -The secret access key of the loki external endpoint +The secret access key (password) for the external S3-compatible bucket. ## .spec.distribution.modules.logging.loki.resources @@ -1949,13 +2068,13 @@ The secret access key of the loki external endpoint ### Description -The cpu limit for the loki pods +The CPU limit for the Pod. Example: `1000m`. ## .spec.distribution.modules.logging.loki.resources.limits.memory ### Description -The memory limit for the opensearch pods +The memory limit for the Pod. Example: `1G`. ## .spec.distribution.modules.logging.loki.resources.requests @@ -1970,13 +2089,13 @@ The memory limit for the opensearch pods ### Description -The cpu request for the prometheus pods +The CPU request for the Pod, in cores. Example: `500m`. ## .spec.distribution.modules.logging.loki.resources.requests.memory ### Description -The memory request for the opensearch pods +The memory request for the Pod. Example: `500M`. ## .spec.distribution.modules.logging.loki.tsdbStartDate @@ -1998,6 +2117,10 @@ Value must be a string in `ISO 8601` date format (`yyyy-mm-dd`). Example: `2024- | [rootUser](#specdistributionmodulesloggingminiorootuser) | `object` | Optional | | [storageSize](#specdistributionmodulesloggingminiostoragesize) | `string` | Optional | +### Description + +Configuration for Logging's MinIO deployment. + ## .spec.distribution.modules.logging.minio.overrides ### Properties @@ -2011,7 +2134,7 @@ Value must be a string in `ISO 8601` date format (`yyyy-mm-dd`). Example: `2024- ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.logging.minio.overrides.tolerations @@ -2026,7 +2149,7 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.logging.minio.overrides.tolerations.effect @@ -2076,19 +2199,19 @@ The value of the toleration ### Description -The password of the minio root user +The password for the default MinIO root user. ## .spec.distribution.modules.logging.minio.rootUser.username ### Description -The username of the minio root user +The username for the default MinIO root user. ## .spec.distribution.modules.logging.minio.storageSize ### Description -The PVC size for each minio disk, 6 disks total +The PVC size for each MinIO disk, 6 disks total. ## .spec.distribution.modules.logging.opensearch @@ -2114,7 +2237,7 @@ The PVC size for each minio disk, 6 disks total ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.logging.opensearch.overrides.tolerations @@ -2129,7 +2252,7 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.logging.opensearch.overrides.tolerations.effect @@ -2188,13 +2311,13 @@ The value of the toleration ### Description -The cpu limit for the loki pods +The CPU limit for the Pod. Example: `1000m`. ## .spec.distribution.modules.logging.opensearch.resources.limits.memory ### Description -The memory limit for the opensearch pods +The memory limit for the Pod. Example: `1G`. ## .spec.distribution.modules.logging.opensearch.resources.requests @@ -2209,25 +2332,25 @@ The memory limit for the opensearch pods ### Description -The cpu request for the prometheus pods +The CPU request for the Pod, in cores. Example: `500m`. ## .spec.distribution.modules.logging.opensearch.resources.requests.memory ### Description -The memory request for the opensearch pods +The memory request for the Pod. Example: `500M`. ## .spec.distribution.modules.logging.opensearch.storageSize ### Description -The storage size for the opensearch pods +The storage size for the OpenSearch volumes. ## .spec.distribution.modules.logging.opensearch.type ### Description -The type of the opensearch, must be ***single*** or ***triple*** +The type of OpenSearch deployment. One of: `single` for a single replica or `triple` for an HA 3-replicas deployment. ### Constraints @@ -2246,6 +2369,10 @@ The type of the opensearch, must be ***single*** or ***triple*** |:--------------------------------------------------------------|:---------|:---------| | [overrides](#specdistributionmodulesloggingoperatoroverrides) | `object` | Optional | +### Description + +Configuration for the Logging Operator. + ## .spec.distribution.modules.logging.operator.overrides ### Properties @@ -2259,7 +2386,7 @@ The type of the opensearch, must be ***single*** or ***triple*** ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.logging.operator.overrides.tolerations @@ -2274,7 +2401,7 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.logging.operator.overrides.tolerations.effect @@ -2321,13 +2448,17 @@ The value of the toleration | [nodeSelector](#specdistributionmodulesloggingoverridesnodeselector) | `object` | Optional | | [tolerations](#specdistributionmodulesloggingoverridestolerations) | `array` | Optional | +### Description + +Override the common configuration with a particular configuration for the module. + ## .spec.distribution.modules.logging.overrides.ingresses ## .spec.distribution.modules.logging.overrides.nodeSelector ### Description -The node selector to use to place the pods for the security module +Set to override the node selector used to place the pods of the module. ## .spec.distribution.modules.logging.overrides.tolerations @@ -2342,7 +2473,7 @@ The node selector to use to place the pods for the security module ### Description -The tolerations that will be added to the pods for the monitoring module +Set to override the tolerations that will be added to the pods of the module. ## .spec.distribution.modules.logging.overrides.tolerations.effect @@ -2383,7 +2514,13 @@ The value of the toleration ### Description -selects the logging stack. Choosing none will disable the centralized logging. Choosing opensearch will deploy and configure the Logging Operator and an OpenSearch cluster (can be single or triple for HA) where the logs will be stored. Choosing loki will use a distributed Grafana Loki instead of OpenSearh for storage. Choosing customOuput the Logging Operator will be deployed and installed but with no local storage, you will have to create the needed Outputs and ClusterOutputs to ship the logs to your desired storage. +Selects the logging stack. Options are: +- `none`: will disable the centralized logging. +- `opensearch`: will deploy and configure the Logging Operator and an OpenSearch cluster (can be single or triple for HA) where the logs will be stored. +- `loki`: will use a distributed Grafana Loki instead of OpenSearh for storage. +- `customOuputs`: the Logging Operator will be deployed and installed but with no local storage, you will have to create the needed Outputs and ClusterOutputs to ship the logs to your desired storage. + +Default is `opensearch`. ### Constraints @@ -2416,7 +2553,7 @@ selects the logging stack. Choosing none will disable the centralized logging. C ### Description -configuration for the Monitoring module components +Configuration for the Monitoring module. ## .spec.distribution.modules.monitoring.alertmanager @@ -2432,19 +2569,19 @@ configuration for the Monitoring module components ### Description -The webhook url to send deadman switch monitoring, for example to use with healthchecks.io +The webhook URL to send dead man's switch monitoring, for example to use with healthchecks.io. ## .spec.distribution.modules.monitoring.alertmanager.installDefaultRules ### Description -If true, the default rules will be installed +Set to false to avoid installing the Prometheus rules (alerts) included with the distribution. ## .spec.distribution.modules.monitoring.alertmanager.slackWebhookUrl ### Description -The slack webhook url to send alerts +The Slack webhook URL where to send the infrastructural and workload alerts to. ## .spec.distribution.modules.monitoring.blackboxExporter @@ -2467,7 +2604,7 @@ The slack webhook url to send alerts ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.monitoring.blackboxExporter.overrides.tolerations @@ -2482,7 +2619,7 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.monitoring.blackboxExporter.overrides.tolerations.effect @@ -2550,7 +2687,7 @@ Notice that by default anonymous access is enabled. ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.monitoring.grafana.overrides.tolerations @@ -2565,7 +2702,7 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.monitoring.grafana.overrides.tolerations.effect @@ -2635,7 +2772,7 @@ More details in [Grafana's documentation](https://grafana.com/docs/grafana/lates ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.monitoring.kubeStateMetrics.overrides.tolerations @@ -2650,7 +2787,7 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.monitoring.kubeStateMetrics.overrides.tolerations.effect @@ -2698,11 +2835,15 @@ The value of the toleration | [overrides](#specdistributionmodulesmonitoringmimiroverrides) | `object` | Optional | | [retentionTime](#specdistributionmodulesmonitoringmimirretentiontime) | `string` | Optional | +### Description + +Configuration for the Mimir package. + ## .spec.distribution.modules.monitoring.mimir.backend ### Description -The backend for the mimir pods, must be ***minio*** or ***externalEndpoint*** +The storage backend type for Mimir. `minio` will use an in-cluster MinIO deployment for object storage, `externalEndpoint` can be used to point to an external S3-compatible object storage instead of deploying an in-cluster MinIO. ### Constraints @@ -2725,35 +2866,39 @@ The backend for the mimir pods, must be ***minio*** or ***externalEndpoint*** | [insecure](#specdistributionmodulesmonitoringmimirexternalendpointinsecure) | `boolean` | Optional | | [secretAccessKey](#specdistributionmodulesmonitoringmimirexternalendpointsecretaccesskey) | `string` | Optional | +### Description + +Configuration for Mimir's external storage backend. + ## .spec.distribution.modules.monitoring.mimir.externalEndpoint.accessKeyId ### Description -The access key id of the external mimir backend +The access key ID (username) for the external S3-compatible bucket. ## .spec.distribution.modules.monitoring.mimir.externalEndpoint.bucketName ### Description -The bucket name of the external mimir backend +The bucket name of the external S3-compatible object storage. ## .spec.distribution.modules.monitoring.mimir.externalEndpoint.endpoint ### Description -The endpoint of the external mimir backend +External S3-compatible endpoint for Mimir's storage. ## .spec.distribution.modules.monitoring.mimir.externalEndpoint.insecure ### Description -If true, the external mimir backend will not use tls +If true, will use HTTP as protocol instead of HTTPS. ## .spec.distribution.modules.monitoring.mimir.externalEndpoint.secretAccessKey ### Description -The secret access key of the external mimir backend +The secret access key (password) for the external S3-compatible bucket. ## .spec.distribution.modules.monitoring.mimir.overrides @@ -2768,7 +2913,7 @@ The secret access key of the external mimir backend ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.monitoring.mimir.overrides.tolerations @@ -2783,7 +2928,7 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.monitoring.mimir.overrides.tolerations.effect @@ -2824,7 +2969,7 @@ The value of the toleration ### Description -The retention time for the mimir pods +The retention time for the logs stored in Mimir. Default is `30d`. Value must match the regular expression `[0-9]+(ns|us|µs|ms|s|m|h|d|w|y)` where y = 365 days. ## .spec.distribution.modules.monitoring.minio @@ -2836,6 +2981,10 @@ The retention time for the mimir pods | [rootUser](#specdistributionmodulesmonitoringminiorootuser) | `object` | Optional | | [storageSize](#specdistributionmodulesmonitoringminiostoragesize) | `string` | Optional | +### Description + +Configuration for Monitoring's MinIO deployment. + ## .spec.distribution.modules.monitoring.minio.overrides ### Properties @@ -2849,7 +2998,7 @@ The retention time for the mimir pods ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.monitoring.minio.overrides.tolerations @@ -2864,7 +3013,7 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.monitoring.minio.overrides.tolerations.effect @@ -2914,19 +3063,19 @@ The value of the toleration ### Description -The password for the minio root user +The password for the default MinIO root user. ## .spec.distribution.modules.monitoring.minio.rootUser.username ### Description -The username for the minio root user +The username for the default MinIO root user. ## .spec.distribution.modules.monitoring.minio.storageSize ### Description -The storage size for the minio pods +The PVC size for each MinIO disk, 6 disks total. ## .spec.distribution.modules.monitoring.overrides @@ -2938,13 +3087,17 @@ The storage size for the minio pods | [nodeSelector](#specdistributionmodulesmonitoringoverridesnodeselector) | `object` | Optional | | [tolerations](#specdistributionmodulesmonitoringoverridestolerations) | `array` | Optional | +### Description + +Override the common configuration with a particular configuration for the module. + ## .spec.distribution.modules.monitoring.overrides.ingresses ## .spec.distribution.modules.monitoring.overrides.nodeSelector ### Description -The node selector to use to place the pods for the security module +Set to override the node selector used to place the pods of the module. ## .spec.distribution.modules.monitoring.overrides.tolerations @@ -2959,7 +3112,7 @@ The node selector to use to place the pods for the security module ### Description -The tolerations that will be added to the pods for the monitoring module +Set to override the tolerations that will be added to the pods of the module. ## .spec.distribution.modules.monitoring.overrides.tolerations.effect @@ -3038,13 +3191,13 @@ Set this option to ship the collected metrics to a remote Prometheus receiver. ### Description -The cpu limit for the loki pods +The CPU limit for the Pod. Example: `1000m`. ## .spec.distribution.modules.monitoring.prometheus.resources.limits.memory ### Description -The memory limit for the opensearch pods +The memory limit for the Pod. Example: `1G`. ## .spec.distribution.modules.monitoring.prometheus.resources.requests @@ -3059,31 +3212,31 @@ The memory limit for the opensearch pods ### Description -The cpu request for the prometheus pods +The CPU request for the Pod, in cores. Example: `500m`. ## .spec.distribution.modules.monitoring.prometheus.resources.requests.memory ### Description -The memory request for the opensearch pods +The memory request for the Pod. Example: `500M`. ## .spec.distribution.modules.monitoring.prometheus.retentionSize ### Description -The retention size for the k8s Prometheus instance. +The retention size for the `k8s` Prometheus instance. ## .spec.distribution.modules.monitoring.prometheus.retentionTime ### Description -The retention time for the K8s Prometheus instance. +The retention time for the `k8s` Prometheus instance. ## .spec.distribution.modules.monitoring.prometheus.storageSize ### Description -The storage size for the k8s Prometheus instance. +The storage size for the `k8s` Prometheus instance. ## .spec.distribution.modules.monitoring.prometheusAgent @@ -3124,13 +3277,13 @@ Set this option to ship the collected metrics to a remote Prometheus receiver. ### Description -The cpu limit for the loki pods +The CPU limit for the Pod. Example: `1000m`. ## .spec.distribution.modules.monitoring.prometheusAgent.resources.limits.memory ### Description -The memory limit for the opensearch pods +The memory limit for the Pod. Example: `1G`. ## .spec.distribution.modules.monitoring.prometheusAgent.resources.requests @@ -3145,24 +3298,26 @@ The memory limit for the opensearch pods ### Description -The cpu request for the prometheus pods +The CPU request for the Pod, in cores. Example: `500m`. ## .spec.distribution.modules.monitoring.prometheusAgent.resources.requests.memory ### Description -The memory request for the opensearch pods +The memory request for the Pod. Example: `500M`. ## .spec.distribution.modules.monitoring.type ### Description -The type of the monitoring, must be ***none***, ***prometheus***, ***prometheusAgent*** or ***mimir***. +The type of the monitoring, must be `none`, `prometheus`, `prometheusAgent` or `mimir`. - `none`: will disable the whole monitoring stack. - `prometheus`: will install Prometheus Operator and a preconfigured Prometheus instace, Alertmanager, a set of alert rules, exporters needed to monitor all the components of the cluster, Grafana and a series of dashboards to view the collected metrics, and more. - `prometheusAgent`: wil install Prometheus operator, an instance of Prometheus in Agent mode (no alerting, no queries, no storage), and all the exporters needed to get metrics for the status of the cluster and the workloads. Useful when having a centralized (remote) Prometheus where to ship the metrics and not storing them locally in the cluster. -- `mimir`: will install the same as the `prometheus` option, and in addition Grafana Mimir that allows for longer retention of metrics and the usage of Object Storage. +- `mimir`: will install the same as the `prometheus` option, plus Grafana Mimir that allows for longer retention of metrics and the usage of Object Storage. + +Default is `prometheus`. ### Constraints @@ -3196,7 +3351,7 @@ The type of the monitoring, must be ***none***, ***prometheus***, ***prometheusA ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.monitoring.x509Exporter.overrides.tolerations @@ -3211,7 +3366,7 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.monitoring.x509Exporter.overrides.tolerations.effect @@ -3259,6 +3414,10 @@ The value of the toleration | [tigeraOperator](#specdistributionmodulesnetworkingtigeraoperator) | `object` | Optional | | [type](#specdistributionmodulesnetworkingtype) | `string` | Required | +### Description + +Configuration for the Networking module. + ## .spec.distribution.modules.networking.cilium ### Properties @@ -3271,6 +3430,10 @@ The value of the toleration ## .spec.distribution.modules.networking.cilium.maskSize +### Description + +The mask size to use for the Pods network on each node. + ## .spec.distribution.modules.networking.cilium.overrides ### Properties @@ -3284,7 +3447,7 @@ The value of the toleration ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.networking.cilium.overrides.tolerations @@ -3299,7 +3462,7 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.networking.cilium.overrides.tolerations.effect @@ -3338,6 +3501,10 @@ The value of the toleration ## .spec.distribution.modules.networking.cilium.podCidr +### Description + +Allows specifing a CIDR for the Pods network different from `.spec.kubernetes.podCidr`. If not set the default is to use `.spec.kubernetes.podCidr`. + ### Constraints **pattern**: the string must match the following regular expression: @@ -3358,13 +3525,17 @@ The value of the toleration | [nodeSelector](#specdistributionmodulesnetworkingoverridesnodeselector) | `object` | Optional | | [tolerations](#specdistributionmodulesnetworkingoverridestolerations) | `array` | Optional | +### Description + +Override the common configuration with a particular configuration for the module. + ## .spec.distribution.modules.networking.overrides.ingresses ## .spec.distribution.modules.networking.overrides.nodeSelector ### Description -The node selector to use to place the pods for the security module +Set to override the node selector used to place the pods of the module. ## .spec.distribution.modules.networking.overrides.tolerations @@ -3379,7 +3550,7 @@ The node selector to use to place the pods for the security module ### Description -The tolerations that will be added to the pods for the monitoring module +Set to override the tolerations that will be added to the pods of the module. ## .spec.distribution.modules.networking.overrides.tolerations.effect @@ -3437,7 +3608,7 @@ The value of the toleration ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.networking.tigeraOperator.overrides.tolerations @@ -3452,7 +3623,7 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.networking.tigeraOperator.overrides.tolerations.effect @@ -3493,7 +3664,7 @@ The value of the toleration ### Description -The type of networking to use, either ***none***, ***calico*** or ***cilium*** +The type of CNI plugin to use, either `none`, `calico` (Tigera Operator) or `cilium`. ### Constraints @@ -3516,6 +3687,10 @@ The type of networking to use, either ***none***, ***calico*** or ***cilium*** | [overrides](#specdistributionmodulespolicyoverrides) | `object` | Optional | | [type](#specdistributionmodulespolicytype) | `string` | Required | +### Description + +Configuration for the Policy module. + ## .spec.distribution.modules.policy.gatekeeper ### Properties @@ -3527,6 +3702,10 @@ The type of networking to use, either ***none***, ***calico*** or ***cilium*** | [installDefaultPolicies](#specdistributionmodulespolicygatekeeperinstalldefaultpolicies) | `boolean` | Required | | [overrides](#specdistributionmodulespolicygatekeeperoverrides) | `object` | Optional | +### Description + +Configuration for the Gatekeeper package. + ## .spec.distribution.modules.policy.gatekeeper.additionalExcludedNamespaces ### Description @@ -3537,7 +3716,7 @@ This parameter adds namespaces to Gatekeeper's exemption list, so it will not en ### Description -The enforcement action to use for the gatekeeper module +The default enforcement action to use for the included constraints. `deny` will block the admission when violations to the policies are found, `warn` will show a message to the user but will admit the violating requests and `dryrun` won't give any feedback to the user but it will log the violations. ### Constraints @@ -3553,7 +3732,7 @@ The enforcement action to use for the gatekeeper module ### Description -If true, the default policies will be installed +Set to `false` to avoid installing the default Gatekeeper policies (constraints templates and constraints) included with the distribution. ## .spec.distribution.modules.policy.gatekeeper.overrides @@ -3568,7 +3747,7 @@ If true, the default policies will be installed ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.policy.gatekeeper.overrides.tolerations @@ -3583,7 +3762,7 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.policy.gatekeeper.overrides.tolerations.effect @@ -3631,17 +3810,21 @@ The value of the toleration | [overrides](#specdistributionmodulespolicykyvernooverrides) | `object` | Optional | | [validationFailureAction](#specdistributionmodulespolicykyvernovalidationfailureaction) | `string` | Required | +### Description + +Configuration for the Kyverno package. + ## .spec.distribution.modules.policy.kyverno.additionalExcludedNamespaces ### Description -This parameter adds namespaces to Kyverno's exemption list, so it will not enforce the constraints on them. +This parameter adds namespaces to Kyverno's exemption list, so it will not enforce the policies on them. ## .spec.distribution.modules.policy.kyverno.installDefaultPolicies ### Description -If true, the default policies will be installed +Set to `false` to avoid installing the default Kyverno policies included with distribution. ## .spec.distribution.modules.policy.kyverno.overrides @@ -3656,7 +3839,7 @@ If true, the default policies will be installed ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.policy.kyverno.overrides.tolerations @@ -3671,7 +3854,7 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.policy.kyverno.overrides.tolerations.effect @@ -3712,7 +3895,7 @@ The value of the toleration ### Description -The validation failure action to use for the kyverno module +The validation failure action to use for the policies, `Enforce` will block when a request does not comply with the policies and `Audit` will not block but log when a request does not comply with the policies. ### Constraints @@ -3733,13 +3916,17 @@ The validation failure action to use for the kyverno module | [nodeSelector](#specdistributionmodulespolicyoverridesnodeselector) | `object` | Optional | | [tolerations](#specdistributionmodulespolicyoverridestolerations) | `array` | Optional | +### Description + +Override the common configuration with a particular configuration for the module. + ## .spec.distribution.modules.policy.overrides.ingresses ## .spec.distribution.modules.policy.overrides.nodeSelector ### Description -The node selector to use to place the pods for the security module +Set to override the node selector used to place the pods of the module. ## .spec.distribution.modules.policy.overrides.tolerations @@ -3754,7 +3941,7 @@ The node selector to use to place the pods for the security module ### Description -The tolerations that will be added to the pods for the monitoring module +Set to override the tolerations that will be added to the pods of the module. ## .spec.distribution.modules.policy.overrides.tolerations.effect @@ -3795,7 +3982,9 @@ The value of the toleration ### Description -The type of security to use, either ***none***, ***gatekeeper*** or ***kyverno*** +The type of policy enforcement to use, either `none`, `gatekeeper` or `kyverno`. + +Default is `none`. ### Constraints @@ -3818,6 +4007,10 @@ The type of security to use, either ***none***, ***gatekeeper*** or ***kyverno** | [tempo](#specdistributionmodulestracingtempo) | `object` | Optional | | [type](#specdistributionmodulestracingtype) | `string` | Required | +### Description + +Configuration for the Tracing module. + ## .spec.distribution.modules.tracing.minio ### Properties @@ -3828,6 +4021,10 @@ The type of security to use, either ***none***, ***gatekeeper*** or ***kyverno** | [rootUser](#specdistributionmodulestracingminiorootuser) | `object` | Optional | | [storageSize](#specdistributionmodulestracingminiostoragesize) | `string` | Optional | +### Description + +Configuration for Tracing's MinIO deployment. + ## .spec.distribution.modules.tracing.minio.overrides ### Properties @@ -3841,7 +4038,7 @@ The type of security to use, either ***none***, ***gatekeeper*** or ***kyverno** ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.tracing.minio.overrides.tolerations @@ -3856,7 +4053,7 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.tracing.minio.overrides.tolerations.effect @@ -3906,19 +4103,19 @@ The value of the toleration ### Description -The password for the minio root user +The password for the default MinIO root user. ## .spec.distribution.modules.tracing.minio.rootUser.username ### Description -The username for the minio root user +The username for the default MinIO root user. ## .spec.distribution.modules.tracing.minio.storageSize ### Description -The storage size for the minio pods +The PVC size for each MinIO disk, 6 disks total. ## .spec.distribution.modules.tracing.overrides @@ -3930,13 +4127,17 @@ The storage size for the minio pods | [nodeSelector](#specdistributionmodulestracingoverridesnodeselector) | `object` | Optional | | [tolerations](#specdistributionmodulestracingoverridestolerations) | `array` | Optional | +### Description + +Override the common configuration with a particular configuration for the module. + ## .spec.distribution.modules.tracing.overrides.ingresses ## .spec.distribution.modules.tracing.overrides.nodeSelector ### Description -The node selector to use to place the pods for the security module +Set to override the node selector used to place the pods of the module. ## .spec.distribution.modules.tracing.overrides.tolerations @@ -3951,7 +4152,7 @@ The node selector to use to place the pods for the security module ### Description -The tolerations that will be added to the pods for the monitoring module +Set to override the tolerations that will be added to the pods of the module. ## .spec.distribution.modules.tracing.overrides.tolerations.effect @@ -3999,11 +4200,15 @@ The value of the toleration | [overrides](#specdistributionmodulestracingtempooverrides) | `object` | Optional | | [retentionTime](#specdistributionmodulestracingtemporetentiontime) | `string` | Optional | +### Description + +Configuration for the Tempo package. + ## .spec.distribution.modules.tracing.tempo.backend ### Description -The backend for the tempo pods, must be ***minio*** or ***externalEndpoint*** +The storage backend type for Tempo. `minio` will use an in-cluster MinIO deployment for object storage, `externalEndpoint` can be used to point to an external S3-compatible object storage instead of deploying an in-cluster MinIO. ### Constraints @@ -4026,35 +4231,39 @@ The backend for the tempo pods, must be ***minio*** or ***externalEndpoint*** | [insecure](#specdistributionmodulestracingtempoexternalendpointinsecure) | `boolean` | Optional | | [secretAccessKey](#specdistributionmodulestracingtempoexternalendpointsecretaccesskey) | `string` | Optional | +### Description + +Configuration for Tempo's external storage backend. + ## .spec.distribution.modules.tracing.tempo.externalEndpoint.accessKeyId ### Description -The access key id of the external tempo backend +The access key ID (username) for the external S3-compatible bucket. ## .spec.distribution.modules.tracing.tempo.externalEndpoint.bucketName ### Description -The bucket name of the external tempo backend +The bucket name of the external S3-compatible object storage. ## .spec.distribution.modules.tracing.tempo.externalEndpoint.endpoint ### Description -The endpoint of the external tempo backend +External S3-compatible endpoint for Tempo's storage. ## .spec.distribution.modules.tracing.tempo.externalEndpoint.insecure ### Description -If true, the external tempo backend will not use tls +If true, will use HTTP as protocol instead of HTTPS. ## .spec.distribution.modules.tracing.tempo.externalEndpoint.secretAccessKey ### Description -The secret access key of the external tempo backend +The secret access key (password) for the external S3-compatible bucket. ## .spec.distribution.modules.tracing.tempo.overrides @@ -4069,7 +4278,7 @@ The secret access key of the external tempo backend ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.tracing.tempo.overrides.tolerations @@ -4084,7 +4293,7 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.tracing.tempo.overrides.tolerations.effect @@ -4125,13 +4334,15 @@ The value of the toleration ### Description -The retention time for the tempo pods +The retention time for the traces stored in Tempo. ## .spec.distribution.modules.tracing.type ### Description -The type of tracing to use, either ***none*** or ***tempo*** +The type of tracing to use, either `none` or `tempo`. `none` will disable the Tracing module and `tempo` will install a Grafana Tempo deployment. + +Default is `tempo`. ### Constraints @@ -4144,6 +4355,10 @@ The type of tracing to use, either ***none*** or ***tempo*** ## .spec.distributionVersion +### Description + +Defines which KFD version will be installed and, in consequence, the Kubernetes version used to create the cluster. It supports git tags and branches. Example: `v1.30.1`. + ### Constraints **minimum length**: the minimum number of characters for this string is: `1` diff --git a/docs/schemas/onpremises-kfd-v1alpha2.md b/docs/schemas/onpremises-kfd-v1alpha2.md index 67cfd9844..f3b0f827a 100644 --- a/docs/schemas/onpremises-kfd-v1alpha2.md +++ b/docs/schemas/onpremises-kfd-v1alpha2.md @@ -2,8 +2,14 @@ This document explains the full schema for the `kind: OnPremises` for the `furyctl.yaml` file used by `furyctl`. This configuration file will be used to deploy the Kubernetes Fury Distribution modules and cluster on premises. -An example file can be found [here](https://github.com/sighupio/fury-distribution/blob/feature/schema-docs/templates/config/onpremises-kfd-v1alpha2.yaml.tpl). +An example configuration file can be created by running the following command: +```bash +furyctl create config --kind OnPremises --version v1.29.4 --name example-cluster +``` + +> [!NOTE] +> Replace the version with your desired version of KFD. ## Properties | Property | Type | Required | @@ -13,6 +19,10 @@ An example file can be found [here](https://github.com/sighupio/fury-distributio | [metadata](#metadata) | `object` | Required | | [spec](#spec) | `object` | Required | +### Description + +A KFD Cluster deployed on top of a set of existing VMs. + ## .apiVersion ### Constraints @@ -103,7 +113,7 @@ EXPERIMENTAL FEATURE. This field defines whether Network Policies are provided f ### Description -The node selector to use to place the pods for all the KFD modules. Follows Kubernetes selector format. Example: `node.kubernetes.io/role: infra` +The node selector to use to place the pods for all the KFD modules. Follows Kubernetes selector format. Example: `node.kubernetes.io/role: infra`. ## .spec.distribution.common.provider @@ -125,6 +135,8 @@ The provider type. Don't set. FOR INTERNAL USE ONLY. URL of the registry where to pull images from for the Distribution phase. (Default is `registry.sighup.io/fury`). +NOTE: If plugins are pulling from the default registry, the registry will be replaced for the plugin too. + ## .spec.distribution.common.relativeVendorPath ### Description @@ -1077,6 +1089,8 @@ The type of the Auth provider, options are: - `sso`: will protect the infrastructural ingresses with Pomerium and Dex (SSO) and require authentication before accessing them. - `basicAuth`: will protect the infrastructural ingresses with HTTP basic auth (username and password) authentication. +Default is `none`. + ### Constraints **enum**: the value of this property must be equal to one of the following string values: @@ -1179,6 +1193,8 @@ The value of the toleration The type of the Disaster Recovery, must be `none` or `on-premises`. `none` disables the module and `on-premises` will install Velero and an optional MinIO deployment. +Default is `none`. + ### Constraints **enum**: the value of this property must be equal to one of the following string values: @@ -1494,7 +1510,7 @@ The email address to use during the certificate issuing process. ### Description -Name of the clusterIssuer +Name of the clusterIssuer. ## .spec.distribution.modules.ingress.certManager.clusterIssuer.solvers @@ -1666,7 +1682,7 @@ The value of the toleration ### Description -Configurations for the nginx ingress controller package. +Configurations for the Ingress nginx controller package. ## .spec.distribution.modules.ingress.nginx.overrides @@ -1794,11 +1810,13 @@ The signing key file's content. You can use the `"{file://}"` notation to ### Description -The type of the nginx ingress controller, options are: +The type of the Ingress nginx controller, options are: - `none`: no ingress controller will be installed and no infrastructural ingresses will be created. - `single`: a single ingress controller with ingress class `nginx` will be installed to manage all the ingress resources, infrastructural ingresses will be created. - `dual`: two independent ingress controllers will be installed, one for the `internal` ingress class intended for private ingresses and one for the `external` ingress class intended for public ingresses. KFD infrastructural ingresses wil use the `internal` ingress class when using the dual type. +Default is `single`. + ### Constraints **enum**: the value of this property must be equal to one of the following string values: @@ -1863,7 +1881,7 @@ Use this ingress class for the ingress instead of the default one. ### Description -Set to override the node selector used to place the pods of the Ingress module +Set to override the node selector used to place the pods of the Ingress module. ## .spec.distribution.modules.ingress.overrides.tolerations @@ -1878,7 +1896,7 @@ Set to override the node selector used to place the pods of the Ingress module ### Description -Set to override the tolerations that will be added to the pods of the Ingress module +Set to override the tolerations that will be added to the pods of the Ingress module. ## .spec.distribution.modules.ingress.overrides.tolerations.effect @@ -1946,7 +1964,7 @@ Configuration for the Logging module. ### Description -DEPRECATED in latest versions of KFD. +DEPRECATED since KFD v1.26.6, 1.27.5, v1.28.0. ## .spec.distribution.modules.logging.cerebro.overrides @@ -2178,13 +2196,13 @@ The secret access key (password) for the external S3-compatible bucket. ### Description -The cpu limit for the loki pods +The CPU limit for the Pod. Example: `1000m`. ## .spec.distribution.modules.logging.loki.resources.limits.memory ### Description -The memory limit for the prometheus pods +The memory limit for the Pod. Example: `1G`. ## .spec.distribution.modules.logging.loki.resources.requests @@ -2199,13 +2217,13 @@ The memory limit for the prometheus pods ### Description -The cpu request for the loki pods +The CPU request for the Pod, in cores. Example: `500m`. ## .spec.distribution.modules.logging.loki.resources.requests.memory ### Description -The memory request for the prometheus pods +The memory request for the Pod. Example: `500M`. ## .spec.distribution.modules.logging.loki.tsdbStartDate @@ -2421,13 +2439,13 @@ The value of the toleration ### Description -The cpu limit for the loki pods +The CPU limit for the Pod. Example: `1000m`. ## .spec.distribution.modules.logging.opensearch.resources.limits.memory ### Description -The memory limit for the prometheus pods +The memory limit for the Pod. Example: `1G`. ## .spec.distribution.modules.logging.opensearch.resources.requests @@ -2442,13 +2460,13 @@ The memory limit for the prometheus pods ### Description -The cpu request for the loki pods +The CPU request for the Pod, in cores. Example: `500m`. ## .spec.distribution.modules.logging.opensearch.resources.requests.memory ### Description -The memory request for the prometheus pods +The memory request for the Pod. Example: `500M`. ## .spec.distribution.modules.logging.opensearch.storageSize @@ -2630,6 +2648,8 @@ Selects the logging stack. Options are: - `loki`: will use a distributed Grafana Loki instead of OpenSearh for storage. - `customOuputs`: the Logging Operator will be deployed and installed but with no local storage, you will have to create the needed Outputs and ClusterOutputs to ship the logs to your desired storage. +Default is `opensearch`. + ### Constraints **enum**: the value of this property must be equal to one of the following string values: @@ -2677,7 +2697,7 @@ Configuration for the Monitoring module. ### Description -The webhook URL to send dead man's switch monitoring, for example to use with healthchecks.io +The webhook URL to send dead man's switch monitoring, for example to use with healthchecks.io. ## .spec.distribution.modules.monitoring.alertmanager.installDefaultRules @@ -3299,13 +3319,13 @@ Set this option to ship the collected metrics to a remote Prometheus receiver. ### Description -The cpu limit for the loki pods +The CPU limit for the Pod. Example: `1000m`. ## .spec.distribution.modules.monitoring.prometheus.resources.limits.memory ### Description -The memory limit for the prometheus pods +The memory limit for the Pod. Example: `1G`. ## .spec.distribution.modules.monitoring.prometheus.resources.requests @@ -3320,13 +3340,13 @@ The memory limit for the prometheus pods ### Description -The cpu request for the loki pods +The CPU request for the Pod, in cores. Example: `500m`. ## .spec.distribution.modules.monitoring.prometheus.resources.requests.memory ### Description -The memory request for the prometheus pods +The memory request for the Pod. Example: `500M`. ## .spec.distribution.modules.monitoring.prometheus.retentionSize @@ -3385,13 +3405,13 @@ Set this option to ship the collected metrics to a remote Prometheus receiver. ### Description -The cpu limit for the loki pods +The CPU limit for the Pod. Example: `1000m`. ## .spec.distribution.modules.monitoring.prometheusAgent.resources.limits.memory ### Description -The memory limit for the prometheus pods +The memory limit for the Pod. Example: `1G`. ## .spec.distribution.modules.monitoring.prometheusAgent.resources.requests @@ -3406,13 +3426,13 @@ The memory limit for the prometheus pods ### Description -The cpu request for the loki pods +The CPU request for the Pod, in cores. Example: `500m`. ## .spec.distribution.modules.monitoring.prometheusAgent.resources.requests.memory ### Description -The memory request for the prometheus pods +The memory request for the Pod. Example: `500M`. ## .spec.distribution.modules.monitoring.type @@ -3425,6 +3445,8 @@ The type of the monitoring, must be `none`, `prometheus`, `prometheusAgent` or ` - `prometheusAgent`: wil install Prometheus operator, an instance of Prometheus in Agent mode (no alerting, no queries, no storage), and all the exporters needed to get metrics for the status of the cluster and the workloads. Useful when having a centralized (remote) Prometheus where to ship the metrics and not storing them locally in the cluster. - `mimir`: will install the same as the `prometheus` option, plus Grafana Mimir that allows for longer retention of metrics and the usage of Object Storage. +Default is `prometheus`. + ### Constraints **enum**: the value of this property must be equal to one of the following string values: @@ -4089,6 +4111,8 @@ The value of the toleration The type of policy enforcement to use, either `none`, `gatekeeper` or `kyverno`. +Default is `none`. + ### Constraints **enum**: the value of this property must be equal to one of the following string values: @@ -4445,6 +4469,8 @@ The retention time for the traces stored in Tempo. The type of tracing to use, either `none` or `tempo`. `none` will disable the Tracing module and `tempo` will install a Grafana Tempo deployment. +Default is `tempo`. + ### Constraints **enum**: the value of this property must be equal to one of the following string values: @@ -4458,7 +4484,7 @@ The type of tracing to use, either `none` or `tempo`. `none` will disable the Tr ### Description -Defines which KFD version will be installed and, in consequence, the Kubernetes version used to create the cluster. It supports git tags and branches. Example: v1.30.1. +Defines which KFD version will be installed and, in consequence, the Kubernetes version used to create the cluster. It supports git tags and branches. Example: `v1.30.1`. ### Constraints diff --git a/pkg/apis/ekscluster/v1alpha2/private/schema.go b/pkg/apis/ekscluster/v1alpha2/private/schema.go index 596d9060d..b4117edb8 100644 --- a/pkg/apis/ekscluster/v1alpha2/private/schema.go +++ b/pkg/apis/ekscluster/v1alpha2/private/schema.go @@ -73,23 +73,28 @@ type SpecDistribution struct { } type SpecDistributionCommon struct { - // The node selector to use to place the pods for all the KFD modules + // The node selector to use to place the pods for all the KFD modules. Follows + // Kubernetes selector format. Example: `node.kubernetes.io/role: infra`. NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` // Provider corresponds to the JSON schema field "provider". Provider *SpecDistributionCommonProvider `json:"provider,omitempty" yaml:"provider,omitempty" mapstructure:"provider,omitempty"` // URL of the registry where to pull images from for the Distribution phase. - // (Default is registry.sighup.io/fury). - // - // NOTE: If plugins are pulling from the default registry, the registry will be - // replaced for these plugins too. + // (Default is `registry.sighup.io/fury`). Registry *string `json:"registry,omitempty" yaml:"registry,omitempty" mapstructure:"registry,omitempty"` - // The relative path to the vendor directory, does not need to be changed + // The relative path to the vendor directory, does not need to be changed. RelativeVendorPath *string `json:"relativeVendorPath,omitempty" yaml:"relativeVendorPath,omitempty" mapstructure:"relativeVendorPath,omitempty"` - // The tolerations that will be added to the pods for all the KFD modules + // An array with the tolerations that will be added to the pods for all the KFD + // modules. Follows Kubernetes tolerations format. Example: + // + // ```yaml + // - effect: NoSchedule + // key: node.kubernetes.io/role + // value: infra + // ``` Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` } @@ -123,45 +128,29 @@ type SpecDistributionCustomPatchesConfigMapGeneratorResource struct { Options *SpecDistributionCustomPatchesConfigMapGeneratorResourceOptions `json:"options,omitempty" yaml:"options,omitempty" mapstructure:"options,omitempty"` } -type SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior string - -const ( - SpecDistributionCustomPatchesConfigMapGeneratorResourceBehaviorCreate SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior = "create" - SpecDistributionCustomPatchesConfigMapGeneratorResourceBehaviorMerge SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior = "merge" - SpecDistributionCustomPatchesConfigMapGeneratorResourceBehaviorReplace SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior = "replace" -) - -type SpecDistributionCustomPatchesConfigMapGeneratorResourceOptions struct { - // The annotations of the configmap - Annotations TypesKubeLabels `json:"annotations,omitempty" yaml:"annotations,omitempty" mapstructure:"annotations,omitempty"` - - // If true, the name suffix hash will be disabled - DisableNameSuffixHash *bool `json:"disableNameSuffixHash,omitempty" yaml:"disableNameSuffixHash,omitempty" mapstructure:"disableNameSuffixHash,omitempty"` - - // If true, the configmap will be immutable - Immutable *bool `json:"immutable,omitempty" yaml:"immutable,omitempty" mapstructure:"immutable,omitempty"` - - // The labels of the configmap - Labels TypesKubeLabels `json:"labels,omitempty" yaml:"labels,omitempty" mapstructure:"labels,omitempty"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionCustomPatchesConfigMapGeneratorResource) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecDistributionCustomPatchesConfigMapGeneratorResource: required") + } + type Plain SpecDistributionCustomPatchesConfigMapGeneratorResource + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionCustomPatchesConfigMapGeneratorResource(plain) + return nil } +type SpecDistributionCustomPatchesConfigMapGenerator []SpecDistributionCustomPatchesConfigMapGeneratorResource + // Each entry should follow the format of Kustomize's images patch type SpecDistributionCustomPatchesImages []map[string]interface{} -type SpecDistributionCustomPatchesPatch struct { - // Options corresponds to the JSON schema field "options". - Options *SpecDistributionCustomPatchesPatchOptions `json:"options,omitempty" yaml:"options,omitempty" mapstructure:"options,omitempty"` - - // The patch content - Patch *string `json:"patch,omitempty" yaml:"patch,omitempty" mapstructure:"patch,omitempty"` - - // The path of the patch - Path *string `json:"path,omitempty" yaml:"path,omitempty" mapstructure:"path,omitempty"` - - // Target corresponds to the JSON schema field "target". - Target *SpecDistributionCustomPatchesPatchTarget `json:"target,omitempty" yaml:"target,omitempty" mapstructure:"target,omitempty"` -} - type SpecDistributionCustomPatchesPatchOptions struct { // If true, the kind change will be allowed AllowKindChange *bool `json:"allowKindChange,omitempty" yaml:"allowKindChange,omitempty" mapstructure:"allowKindChange,omitempty"` @@ -193,13 +182,73 @@ type SpecDistributionCustomPatchesPatchTarget struct { Version *string `json:"version,omitempty" yaml:"version,omitempty" mapstructure:"version,omitempty"` } +type SpecDistributionCustomPatchesPatch struct { + // Options corresponds to the JSON schema field "options". + Options *SpecDistributionCustomPatchesPatchOptions `json:"options,omitempty" yaml:"options,omitempty" mapstructure:"options,omitempty"` + + // The patch content + Patch *string `json:"patch,omitempty" yaml:"patch,omitempty" mapstructure:"patch,omitempty"` + + // The path of the patch + Path *string `json:"path,omitempty" yaml:"path,omitempty" mapstructure:"path,omitempty"` + + // Target corresponds to the JSON schema field "target". + Target *SpecDistributionCustomPatchesPatchTarget `json:"target,omitempty" yaml:"target,omitempty" mapstructure:"target,omitempty"` +} + type SpecDistributionCustomPatchesPatches []SpecDistributionCustomPatchesPatch // Each entry should be either a relative file path or an inline content resolving // to a partial or complete resource definition type SpecDistributionCustomPatchesPatchesStrategicMerge []string -type SpecDistributionCustomPatchesSecretGenerator []SpecDistributionCustomPatchesSecretGeneratorResource +type SpecDistributionCustomPatchesSecretGeneratorResourceBehavior string + +var enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior = []interface{}{ + "create", + "replace", + "merge", +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionCustomPatchesSecretGeneratorResourceBehavior) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior, v) + } + *j = SpecDistributionCustomPatchesSecretGeneratorResourceBehavior(v) + return nil +} + +const ( + SpecDistributionCustomPatchesSecretGeneratorResourceBehaviorCreate SpecDistributionCustomPatchesSecretGeneratorResourceBehavior = "create" + SpecDistributionCustomPatchesSecretGeneratorResourceBehaviorReplace SpecDistributionCustomPatchesSecretGeneratorResourceBehavior = "replace" + SpecDistributionCustomPatchesSecretGeneratorResourceBehaviorMerge SpecDistributionCustomPatchesSecretGeneratorResourceBehavior = "merge" +) + +type SpecDistributionCustomPatchesSecretGeneratorResourceOptions struct { + // The annotations of the secret + Annotations TypesKubeLabels `json:"annotations,omitempty" yaml:"annotations,omitempty" mapstructure:"annotations,omitempty"` + + // If true, the name suffix hash will be disabled + DisableNameSuffixHash *bool `json:"disableNameSuffixHash,omitempty" yaml:"disableNameSuffixHash,omitempty" mapstructure:"disableNameSuffixHash,omitempty"` + + // If true, the secret will be immutable + Immutable *bool `json:"immutable,omitempty" yaml:"immutable,omitempty" mapstructure:"immutable,omitempty"` + + // The labels of the secret + Labels TypesKubeLabels `json:"labels,omitempty" yaml:"labels,omitempty" mapstructure:"labels,omitempty"` +} type SpecDistributionCustomPatchesSecretGeneratorResource struct { // The behavior of the secret @@ -227,28 +276,26 @@ type SpecDistributionCustomPatchesSecretGeneratorResource struct { Type *string `json:"type,omitempty" yaml:"type,omitempty" mapstructure:"type,omitempty"` } -type SpecDistributionCustomPatchesSecretGeneratorResourceBehavior string - -const ( - SpecDistributionCustomPatchesSecretGeneratorResourceBehaviorCreate SpecDistributionCustomPatchesSecretGeneratorResourceBehavior = "create" - SpecDistributionCustomPatchesSecretGeneratorResourceBehaviorMerge SpecDistributionCustomPatchesSecretGeneratorResourceBehavior = "merge" - SpecDistributionCustomPatchesSecretGeneratorResourceBehaviorReplace SpecDistributionCustomPatchesSecretGeneratorResourceBehavior = "replace" -) - -type SpecDistributionCustomPatchesSecretGeneratorResourceOptions struct { - // The annotations of the secret - Annotations TypesKubeLabels `json:"annotations,omitempty" yaml:"annotations,omitempty" mapstructure:"annotations,omitempty"` - - // If true, the name suffix hash will be disabled - DisableNameSuffixHash *bool `json:"disableNameSuffixHash,omitempty" yaml:"disableNameSuffixHash,omitempty" mapstructure:"disableNameSuffixHash,omitempty"` - - // If true, the secret will be immutable - Immutable *bool `json:"immutable,omitempty" yaml:"immutable,omitempty" mapstructure:"immutable,omitempty"` - - // The labels of the secret - Labels TypesKubeLabels `json:"labels,omitempty" yaml:"labels,omitempty" mapstructure:"labels,omitempty"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionCustomPatchesSecretGeneratorResource) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecDistributionCustomPatchesSecretGeneratorResource: required") + } + type Plain SpecDistributionCustomPatchesSecretGeneratorResource + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionCustomPatchesSecretGeneratorResource(plain) + return nil } +type SpecDistributionCustomPatchesSecretGenerator []SpecDistributionCustomPatchesSecretGeneratorResource + type SpecDistributionCustompatches struct { // ConfigMapGenerator corresponds to the JSON schema field "configMapGenerator". ConfigMapGenerator SpecDistributionCustomPatchesConfigMapGenerator `json:"configMapGenerator,omitempty" yaml:"configMapGenerator,omitempty" mapstructure:"configMapGenerator,omitempty"` @@ -267,12 +314,9 @@ type SpecDistributionCustompatches struct { SecretGenerator SpecDistributionCustomPatchesSecretGenerator `json:"secretGenerator,omitempty" yaml:"secretGenerator,omitempty" mapstructure:"secretGenerator,omitempty"` } -type SpecDistributionModules struct { - // Auth corresponds to the JSON schema field "auth". - Auth *SpecDistributionModulesAuth `json:"auth,omitempty" yaml:"auth,omitempty" mapstructure:"auth,omitempty"` - - // Aws corresponds to the JSON schema field "aws". - Aws *SpecDistributionModulesAws `json:"aws,omitempty" yaml:"aws,omitempty" mapstructure:"aws,omitempty"` +type SpecDistributionModulesAuthDexExpiry struct { + // Dex ID tokens expiration time duration (default 24h). + IdTokens *string `json:"idTokens,omitempty" yaml:"idTokens,omitempty" mapstructure:"idTokens,omitempty"` // Dr corresponds to the JSON schema field "dr". Dr SpecDistributionModulesDr `json:"dr" yaml:"dr" mapstructure:"dr"` @@ -314,10 +358,23 @@ type SpecDistributionModulesAuth struct { } type SpecDistributionModulesAuthDex struct { - // The additional static clients for dex + // Additional static clients defitions that will be added to the default clients + // included with the distribution in Dex's configuration. Example: + // + // ```yaml + // additionalStaticClients: + // - id: my-custom-client + // name: "A custom additional static client" + // redirectURIs: + // - "https://myapp.tld/redirect" + // - "https://alias.tld/oidc-callback" + // secret: supersecretpassword + // ``` + // Reference: https://dexidp.io/docs/connectors/local/ AdditionalStaticClients []interface{} `json:"additionalStaticClients,omitempty" yaml:"additionalStaticClients,omitempty" mapstructure:"additionalStaticClients,omitempty"` - // The connectors for dex + // A list with each item defining a Dex connector. Follows Dex connectors + // configuration format: https://dexidp.io/docs/connectors/ Connectors []interface{} `json:"connectors" yaml:"connectors" mapstructure:"connectors"` // Expiry corresponds to the JSON schema field "expiry". @@ -347,162 +404,296 @@ type SpecDistributionModulesAuthOverrides struct { } type SpecDistributionModulesAuthOverridesIngress struct { - // The host of the ingress + // Use this host for the ingress instead of the default one. Host string `json:"host" yaml:"host" mapstructure:"host"` - // The ingress class of the ingress + // Use this ingress class for the ingress instead of the default one. IngressClass string `json:"ingressClass" yaml:"ingressClass" mapstructure:"ingressClass"` } type SpecDistributionModulesAuthOverridesIngresses map[string]SpecDistributionModulesAuthOverridesIngress -type SpecDistributionModulesAuthPomerium interface{} - -// override default routes for KFD components -type SpecDistributionModulesAuthPomeriumDefaultRoutesPolicy struct { - // GatekeeperPolicyManager corresponds to the JSON schema field - // "gatekeeperPolicyManager". - GatekeeperPolicyManager []SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyGatekeeperPolicyManagerElem `json:"gatekeeperPolicyManager,omitempty" yaml:"gatekeeperPolicyManager,omitempty" mapstructure:"gatekeeperPolicyManager,omitempty"` - - // HubbleUi corresponds to the JSON schema field "hubbleUi". - HubbleUi []SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyHubbleUiElem `json:"hubbleUi,omitempty" yaml:"hubbleUi,omitempty" mapstructure:"hubbleUi,omitempty"` - - // IngressNgnixForecastle corresponds to the JSON schema field - // "ingressNgnixForecastle". - IngressNgnixForecastle []SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyIngressNgnixForecastleElem `json:"ingressNgnixForecastle,omitempty" yaml:"ingressNgnixForecastle,omitempty" mapstructure:"ingressNgnixForecastle,omitempty"` - - // LoggingMinioConsole corresponds to the JSON schema field "loggingMinioConsole". - LoggingMinioConsole []SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyLoggingMinioConsoleElem `json:"loggingMinioConsole,omitempty" yaml:"loggingMinioConsole,omitempty" mapstructure:"loggingMinioConsole,omitempty"` - - // LoggingOpensearchDashboards corresponds to the JSON schema field - // "loggingOpensearchDashboards". - LoggingOpensearchDashboards []SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyLoggingOpensearchDashboardsElem `json:"loggingOpensearchDashboards,omitempty" yaml:"loggingOpensearchDashboards,omitempty" mapstructure:"loggingOpensearchDashboards,omitempty"` - - // MonitoringAlertmanager corresponds to the JSON schema field - // "monitoringAlertmanager". - MonitoringAlertmanager []SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyMonitoringAlertmanagerElem `json:"monitoringAlertmanager,omitempty" yaml:"monitoringAlertmanager,omitempty" mapstructure:"monitoringAlertmanager,omitempty"` - - // MonitoringGrafana corresponds to the JSON schema field "monitoringGrafana". - MonitoringGrafana []SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyMonitoringGrafanaElem `json:"monitoringGrafana,omitempty" yaml:"monitoringGrafana,omitempty" mapstructure:"monitoringGrafana,omitempty"` - - // MonitoringMinioConsole corresponds to the JSON schema field - // "monitoringMinioConsole". - MonitoringMinioConsole []SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyMonitoringMinioConsoleElem `json:"monitoringMinioConsole,omitempty" yaml:"monitoringMinioConsole,omitempty" mapstructure:"monitoringMinioConsole,omitempty"` +// Override the common configuration with a particular configuration for the Auth +// module. +type SpecDistributionModulesAuthOverrides struct { + // Override the definition of the Auth module ingresses. + Ingresses SpecDistributionModulesAuthOverridesIngresses `json:"ingresses,omitempty" yaml:"ingresses,omitempty" mapstructure:"ingresses,omitempty"` - // MonitoringPrometheus corresponds to the JSON schema field - // "monitoringPrometheus". - MonitoringPrometheus []SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyMonitoringPrometheusElem `json:"monitoringPrometheus,omitempty" yaml:"monitoringPrometheus,omitempty" mapstructure:"monitoringPrometheus,omitempty"` + // Set to override the node selector used to place the pods of the Auth module. + NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` - // TracingMinioConsole corresponds to the JSON schema field "tracingMinioConsole". - TracingMinioConsole []SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyTracingMinioConsoleElem `json:"tracingMinioConsole,omitempty" yaml:"tracingMinioConsole,omitempty" mapstructure:"tracingMinioConsole,omitempty"` + // Set to override the tolerations that will be added to the pods of the Auth + // module. + Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` } -type SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyGatekeeperPolicyManagerElem map[string]interface{} - -type SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyHubbleUiElem map[string]interface{} - -type SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyIngressNgnixForecastleElem map[string]interface{} - -type SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyLoggingMinioConsoleElem map[string]interface{} - -type SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyLoggingOpensearchDashboardsElem map[string]interface{} - -type SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyMonitoringAlertmanagerElem map[string]interface{} - -type SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyMonitoringGrafanaElem map[string]interface{} - -type SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyMonitoringMinioConsoleElem map[string]interface{} - -type SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyMonitoringPrometheusElem map[string]interface{} - -type SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyTracingMinioConsoleElem map[string]interface{} - -type SpecDistributionModulesAuthPomeriumRoutesElem map[string]interface{} +type SpecDistributionModulesAuthPomerium interface{} -// Pomerium needs some user-provided secrets to be fully configured. These secrets -// should be unique between clusters. -type SpecDistributionModulesAuthPomeriumSecrets struct { - // Cookie Secret is the secret used to encrypt and sign session cookies. - // - // To generate a random key, run the following command: `head -c32 /dev/urandom | - // base64` - COOKIESECRET string `json:"COOKIE_SECRET" yaml:"COOKIE_SECRET" mapstructure:"COOKIE_SECRET"` - - // Identity Provider Client Secret is the OAuth 2.0 Secret Identifier. When auth - // type is SSO, this value will be the secret used to authenticate Pomerium with - // Dex, **use a strong random value**. - IDPCLIENTSECRET string `json:"IDP_CLIENT_SECRET" yaml:"IDP_CLIENT_SECRET" mapstructure:"IDP_CLIENT_SECRET"` - - // Shared Secret is the base64-encoded, 256-bit key used to mutually authenticate - // requests between Pomerium services. It's critical that secret keys are random, - // and stored safely. - // - // To generate a key, run the following command: `head -c32 /dev/urandom | base64` - SHAREDSECRET string `json:"SHARED_SECRET" yaml:"SHARED_SECRET" mapstructure:"SHARED_SECRET"` +// Configuration for the HTTP Basic Auth provider. +type SpecDistributionModulesAuthProviderBasicAuth struct { + // The password for logging in with the HTTP basic authentication. + Password string `json:"password" yaml:"password" mapstructure:"password"` - // Signing Key is the base64 representation of one or more PEM-encoded private - // keys used to sign a user's attestation JWT, which can be consumed by upstream - // applications to pass along identifying user information like username, id, and - // groups. - // - // To generates an P-256 (ES256) signing key: - // - // ```bash - // openssl ecparam -genkey -name prime256v1 -noout -out ec_private.pem - // # careful! this will output your private key in terminal - // cat ec_private.pem | base64 - // ``` - SIGNINGKEY string `json:"SIGNING_KEY" yaml:"SIGNING_KEY" mapstructure:"SIGNING_KEY"` + // The username for logging in with the HTTP basic authentication. + Username string `json:"username" yaml:"username" mapstructure:"username"` } -// Configuration for Pomerium, an identity-aware reverse proxy used for SSO. -type SpecDistributionModulesAuthPomerium_2 struct { - // DefaultRoutesPolicy corresponds to the JSON schema field "defaultRoutesPolicy". - DefaultRoutesPolicy *SpecDistributionModulesAuthPomeriumDefaultRoutesPolicy `json:"defaultRoutesPolicy,omitempty" yaml:"defaultRoutesPolicy,omitempty" mapstructure:"defaultRoutesPolicy,omitempty"` - - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleComponentOverrides_1 `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - - // DEPRECATED: Use defaultRoutesPolicy and/or routes - Policy *string `json:"policy,omitempty" yaml:"policy,omitempty" mapstructure:"policy,omitempty"` - - // Additional routes configuration for Pomerium. Follows Pomerium's route format: - // https://www.pomerium.com/docs/reference/routes - Routes []SpecDistributionModulesAuthPomeriumRoutesElem `json:"routes,omitempty" yaml:"routes,omitempty" mapstructure:"routes,omitempty"` - - // Secrets corresponds to the JSON schema field "secrets". - Secrets SpecDistributionModulesAuthPomeriumSecrets `json:"secrets" yaml:"secrets" mapstructure:"secrets"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesAuthProviderBasicAuth) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["password"]; !ok || v == nil { + return fmt.Errorf("field password in SpecDistributionModulesAuthProviderBasicAuth: required") + } + if v, ok := raw["username"]; !ok || v == nil { + return fmt.Errorf("field username in SpecDistributionModulesAuthProviderBasicAuth: required") + } + type Plain SpecDistributionModulesAuthProviderBasicAuth + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesAuthProviderBasicAuth(plain) + return nil } -type SpecDistributionModulesAuthProvider struct { - // BasicAuth corresponds to the JSON schema field "basicAuth". - BasicAuth *SpecDistributionModulesAuthProviderBasicAuth `json:"basicAuth,omitempty" yaml:"basicAuth,omitempty" mapstructure:"basicAuth,omitempty"` +type SpecDistributionModulesAuthProviderType string - // The type of the provider, must be ***none***, ***sso*** or ***basicAuth*** - Type SpecDistributionModulesAuthProviderType `json:"type" yaml:"type" mapstructure:"type"` +var enumValues_SpecDistributionModulesAuthProviderType = []interface{}{ + "none", + "basicAuth", + "sso", } -type SpecDistributionModulesAuthProviderBasicAuth struct { - // The password for the basic auth - Password string `json:"password" yaml:"password" mapstructure:"password"` - - // The username for the basic auth - Username string `json:"username" yaml:"username" mapstructure:"username"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesAuthProviderType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesAuthProviderType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesAuthProviderType, v) + } + *j = SpecDistributionModulesAuthProviderType(v) + return nil } -type SpecDistributionModulesAuthProviderType string - const ( - SpecDistributionModulesAuthProviderTypeBasicAuth SpecDistributionModulesAuthProviderType = "basicAuth" SpecDistributionModulesAuthProviderTypeNone SpecDistributionModulesAuthProviderType = "none" + SpecDistributionModulesAuthProviderTypeBasicAuth SpecDistributionModulesAuthProviderType = "basicAuth" SpecDistributionModulesAuthProviderTypeSso SpecDistributionModulesAuthProviderType = "sso" ) -type SpecDistributionModulesAws struct { - // ClusterAutoscaler corresponds to the JSON schema field "clusterAutoscaler". - ClusterAutoscaler SpecDistributionModulesAwsClusterAutoscaler `json:"clusterAutoscaler" yaml:"clusterAutoscaler" mapstructure:"clusterAutoscaler"` - - // EbsCsiDriver corresponds to the JSON schema field "ebsCsiDriver". +type SpecDistributionModulesAuthProvider struct { + // BasicAuth corresponds to the JSON schema field "basicAuth". + BasicAuth *SpecDistributionModulesAuthProviderBasicAuth `json:"basicAuth,omitempty" yaml:"basicAuth,omitempty" mapstructure:"basicAuth,omitempty"` + + // The type of the Auth provider, options are: + // - `none`: will disable authentication in the infrastructural ingresses. + // - `sso`: will protect the infrastructural ingresses with Pomerium and Dex (SSO) + // and require authentication before accessing them. + // - `basicAuth`: will protect the infrastructural ingresses with HTTP basic auth + // (username and password) authentication. + // + // Default is `none`. + Type SpecDistributionModulesAuthProviderType `json:"type" yaml:"type" mapstructure:"type"` +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesAuthProvider) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesAuthProvider: required") + } + type Plain SpecDistributionModulesAuthProvider + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesAuthProvider(plain) + return nil +} + +// Configuration for the Auth module. +type SpecDistributionModulesAuth struct { + // Base domain for the ingresses created by the Auth module (Gangplank, Pomerium, + // Dex). Notice that when nginx type is dual, these will use the `external` + // ingress class. + BaseDomain *string `json:"baseDomain,omitempty" yaml:"baseDomain,omitempty" mapstructure:"baseDomain,omitempty"` + + // Dex corresponds to the JSON schema field "dex". + Dex *SpecDistributionModulesAuthDex `json:"dex,omitempty" yaml:"dex,omitempty" mapstructure:"dex,omitempty"` + + // Overrides corresponds to the JSON schema field "overrides". + Overrides *SpecDistributionModulesAuthOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // Pomerium corresponds to the JSON schema field "pomerium". + Pomerium SpecDistributionModulesAuthPomerium `json:"pomerium,omitempty" yaml:"pomerium,omitempty" mapstructure:"pomerium,omitempty"` + + // Provider corresponds to the JSON schema field "provider". + Provider SpecDistributionModulesAuthProvider `json:"provider" yaml:"provider" mapstructure:"provider"` +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesAuth) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["provider"]; !ok || v == nil { + return fmt.Errorf("field provider in SpecDistributionModulesAuth: required") + } + type Plain SpecDistributionModulesAuth + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesAuth(plain) + return nil +} + +type TypesAwsArn string + +type TypesAwsIamRoleName string + +type TypesFuryModuleComponentOverridesWithIAMRoleName struct { + // IamRoleName corresponds to the JSON schema field "iamRoleName". + IamRoleName *TypesAwsIamRoleName `json:"iamRoleName,omitempty" yaml:"iamRoleName,omitempty" mapstructure:"iamRoleName,omitempty"` + + // The node selector to use to place the pods for the load balancer controller + // module. + NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` + + // The tolerations that will be added to the pods for the cluster autoscaler + // module. + Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` +} + +type SpecDistributionModulesAwsClusterAutoscaler struct { + // IamRoleArn corresponds to the JSON schema field "iamRoleArn". + IamRoleArn TypesAwsArn `json:"iamRoleArn" yaml:"iamRoleArn" mapstructure:"iamRoleArn"` + + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverridesWithIAMRoleName `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesAwsClusterAutoscaler) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["iamRoleArn"]; !ok || v == nil { + return fmt.Errorf("field iamRoleArn in SpecDistributionModulesAwsClusterAutoscaler: required") + } + type Plain SpecDistributionModulesAwsClusterAutoscaler + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesAwsClusterAutoscaler(plain) + return nil +} + +type SpecDistributionModulesAwsEbsCsiDriver struct { + // IamRoleArn corresponds to the JSON schema field "iamRoleArn". + IamRoleArn TypesAwsArn `json:"iamRoleArn" yaml:"iamRoleArn" mapstructure:"iamRoleArn"` + + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverridesWithIAMRoleName `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesAwsEbsCsiDriver) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["iamRoleArn"]; !ok || v == nil { + return fmt.Errorf("field iamRoleArn in SpecDistributionModulesAwsEbsCsiDriver: required") + } + type Plain SpecDistributionModulesAwsEbsCsiDriver + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesAwsEbsCsiDriver(plain) + return nil +} + +type SpecDistributionModulesAwsEbsSnapshotController struct { + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` +} + +type SpecDistributionModulesAwsLoadBalancerController struct { + // IamRoleArn corresponds to the JSON schema field "iamRoleArn". + IamRoleArn TypesAwsArn `json:"iamRoleArn" yaml:"iamRoleArn" mapstructure:"iamRoleArn"` + + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverridesWithIAMRoleName `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesAwsLoadBalancerController) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["iamRoleArn"]; !ok || v == nil { + return fmt.Errorf("field iamRoleArn in SpecDistributionModulesAwsLoadBalancerController: required") + } + type Plain SpecDistributionModulesAwsLoadBalancerController + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesAwsLoadBalancerController(plain) + return nil +} + +type TypesFuryModuleOverridesIngress struct { + // If true, the ingress will not have authentication even if + // `.spec.modules.auth.provider.type` is SSO or Basic Auth. + DisableAuth *bool `json:"disableAuth,omitempty" yaml:"disableAuth,omitempty" mapstructure:"disableAuth,omitempty"` + + // The type of the provider, must be ***none***, ***sso*** or ***basicAuth*** + Type SpecDistributionModulesAuthProviderType `json:"type" yaml:"type" mapstructure:"type"` +} + +type SpecDistributionModulesAuthProviderBasicAuth struct { + // The password for the basic auth + Password string `json:"password" yaml:"password" mapstructure:"password"` + + // The username for the basic auth + Username string `json:"username" yaml:"username" mapstructure:"username"` +} + +type SpecDistributionModulesAuthProviderType string + +const ( + SpecDistributionModulesAuthProviderTypeBasicAuth SpecDistributionModulesAuthProviderType = "basicAuth" + SpecDistributionModulesAuthProviderTypeNone SpecDistributionModulesAuthProviderType = "none" + SpecDistributionModulesAuthProviderTypeSso SpecDistributionModulesAuthProviderType = "sso" +) + +type SpecDistributionModulesAws struct { + // ClusterAutoscaler corresponds to the JSON schema field "clusterAutoscaler". + ClusterAutoscaler SpecDistributionModulesAwsClusterAutoscaler `json:"clusterAutoscaler" yaml:"clusterAutoscaler" mapstructure:"clusterAutoscaler"` + + // EbsCsiDriver corresponds to the JSON schema field "ebsCsiDriver". EbsCsiDriver SpecDistributionModulesAwsEbsCsiDriver `json:"ebsCsiDriver" yaml:"ebsCsiDriver" mapstructure:"ebsCsiDriver"` // EbsSnapshotController corresponds to the JSON schema field @@ -560,29 +751,112 @@ type SpecDistributionModulesDr struct { type SpecDistributionModulesDrType string const ( - SpecDistributionModulesDrTypeEks SpecDistributionModulesDrType = "eks" SpecDistributionModulesDrTypeNone SpecDistributionModulesDrType = "none" + SpecDistributionModulesDrTypeEks SpecDistributionModulesDrType = "eks" ) -type SpecDistributionModulesDrVelero struct { - // Eks corresponds to the JSON schema field "eks". - Eks SpecDistributionModulesDrVeleroEks `json:"eks" yaml:"eks" mapstructure:"eks"` +type TypesAwsS3BucketName string - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` +type TypesAwsRegion string - // Configuration for Velero's backup schedules. - Schedules *SpecDistributionModulesDrVeleroSchedules `json:"schedules,omitempty" yaml:"schedules,omitempty" mapstructure:"schedules,omitempty"` +var enumValues_TypesAwsRegion = []interface{}{ + "af-south-1", + "ap-east-1", + "ap-northeast-1", + "ap-northeast-2", + "ap-northeast-3", + "ap-south-1", + "ap-south-2", + "ap-southeast-1", + "ap-southeast-2", + "ap-southeast-3", + "ap-southeast-4", + "ca-central-1", + "eu-central-1", + "eu-central-2", + "eu-north-1", + "eu-south-1", + "eu-south-2", + "eu-west-1", + "eu-west-2", + "eu-west-3", + "me-central-1", + "me-south-1", + "sa-east-1", + "us-east-1", + "us-east-2", + "us-gov-east-1", + "us-gov-west-1", + "us-west-1", + "us-west-2", } +// UnmarshalJSON implements json.Unmarshaler. +func (j *TypesAwsRegion) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_TypesAwsRegion { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesAwsRegion, v) + } + *j = TypesAwsRegion(v) + return nil +} + +const TypesAwsRegionAfSouth1 TypesAwsRegion = "af-south-1" + +type Metadata struct { + // The name of the cluster. It will also be used as a prefix for all the other + // resources created. + Name string `json:"name" yaml:"name" mapstructure:"name"` +} + +const ( + TypesAwsRegionApNortheast1 TypesAwsRegion = "ap-northeast-1" + TypesAwsRegionApNortheast2 TypesAwsRegion = "ap-northeast-2" + TypesAwsRegionApNortheast3 TypesAwsRegion = "ap-northeast-3" + TypesAwsRegionApSouth1 TypesAwsRegion = "ap-south-1" + TypesAwsRegionApSouth2 TypesAwsRegion = "ap-south-2" + TypesAwsRegionApSoutheast1 TypesAwsRegion = "ap-southeast-1" + TypesAwsRegionApSoutheast2 TypesAwsRegion = "ap-southeast-2" + TypesAwsRegionApSoutheast3 TypesAwsRegion = "ap-southeast-3" + TypesAwsRegionApSoutheast4 TypesAwsRegion = "ap-southeast-4" + TypesAwsRegionCaCentral1 TypesAwsRegion = "ca-central-1" + TypesAwsRegionEuCentral1 TypesAwsRegion = "eu-central-1" + TypesAwsRegionEuCentral2 TypesAwsRegion = "eu-central-2" + TypesAwsRegionEuNorth1 TypesAwsRegion = "eu-north-1" + TypesAwsRegionEuSouth1 TypesAwsRegion = "eu-south-1" + TypesAwsRegionEuSouth2 TypesAwsRegion = "eu-south-2" + TypesAwsRegionEuWest1 TypesAwsRegion = "eu-west-1" + TypesAwsRegionEuWest2 TypesAwsRegion = "eu-west-2" + TypesAwsRegionEuWest3 TypesAwsRegion = "eu-west-3" + TypesAwsRegionMeCentral1 TypesAwsRegion = "me-central-1" + TypesAwsRegionMeSouth1 TypesAwsRegion = "me-south-1" + TypesAwsRegionSaEast1 TypesAwsRegion = "sa-east-1" + TypesAwsRegionUsEast1 TypesAwsRegion = "us-east-1" + TypesAwsRegionUsEast2 TypesAwsRegion = "us-east-2" + TypesAwsRegionUsGovEast1 TypesAwsRegion = "us-gov-east-1" + TypesAwsRegionUsGovWest1 TypesAwsRegion = "us-gov-west-1" + TypesAwsRegionUsWest1 TypesAwsRegion = "us-west-1" + TypesAwsRegionUsWest2 TypesAwsRegion = "us-west-2" +) + type SpecDistributionModulesDrVeleroEks struct { - // The name of the velero bucket + // The name of the bucket for Velero. BucketName TypesAwsS3BucketName `json:"bucketName" yaml:"bucketName" mapstructure:"bucketName"` // IamRoleArn corresponds to the JSON schema field "iamRoleArn". IamRoleArn TypesAwsArn `json:"iamRoleArn" yaml:"iamRoleArn" mapstructure:"iamRoleArn"` - // The region where the velero bucket is located + // The region where the bucket for Velero will be located. Region TypesAwsRegion `json:"region" yaml:"region" mapstructure:"region"` } @@ -688,14 +962,47 @@ type SpecDistributionModulesIngressCertManagerClusterIssuer struct { type SpecDistributionModulesIngressCertManagerClusterIssuerType string +var enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType = []interface{}{ + "dns01", + "http01", +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesIngressCertManagerClusterIssuerType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType, v) + } + *j = SpecDistributionModulesIngressCertManagerClusterIssuerType(v) + return nil +} + const ( SpecDistributionModulesIngressCertManagerClusterIssuerTypeDns01 SpecDistributionModulesIngressCertManagerClusterIssuerType = "dns01" SpecDistributionModulesIngressCertManagerClusterIssuerTypeHttp01 SpecDistributionModulesIngressCertManagerClusterIssuerType = "http01" ) -type SpecDistributionModulesIngressClusterIssuerRoute53 struct { - // HostedZoneId corresponds to the JSON schema field "hostedZoneId". - HostedZoneId string `json:"hostedZoneId" yaml:"hostedZoneId" mapstructure:"hostedZoneId"` +// Configuration for the cert-manager's ACME clusterIssuer used to request +// certificates from Let's Encrypt. +type SpecDistributionModulesIngressCertManagerClusterIssuer struct { + // The email address to use during the certificate issuing process. + Email string `json:"email" yaml:"email" mapstructure:"email"` + + // Name of the clusterIssuer. + Name string `json:"name" yaml:"name" mapstructure:"name"` + + // Route53 corresponds to the JSON schema field "route53". + Route53 SpecDistributionModulesIngressClusterIssuerRoute53 `json:"route53" yaml:"route53" mapstructure:"route53"` // IamRoleArn corresponds to the JSON schema field "iamRoleArn". IamRoleArn TypesAwsArn `json:"iamRoleArn" yaml:"iamRoleArn" mapstructure:"iamRoleArn"` @@ -715,25 +1022,85 @@ type SpecDistributionModulesIngressDNS struct { Public *SpecDistributionModulesIngressDNSPublic `json:"public,omitempty" yaml:"public,omitempty" mapstructure:"public,omitempty"` } -type SpecDistributionModulesIngressDNSPrivate struct { - // If true, the private hosted zone will be created - Create bool `json:"create" yaml:"create" mapstructure:"create"` - - // The name of the private hosted zone - Name string `json:"name" yaml:"name" mapstructure:"name"` - - // VpcId corresponds to the JSON schema field "vpcId". - VpcId string `json:"vpcId" yaml:"vpcId" mapstructure:"vpcId"` -} - -type SpecDistributionModulesIngressDNSPublic struct { - // If true, the public hosted zone will be created - Create bool `json:"create" yaml:"create" mapstructure:"create"` - - // The name of the public hosted zone - Name string `json:"name" yaml:"name" mapstructure:"name"` -} - +type SpecDistributionModulesIngressDNSPrivate struct { + // By default, a Terraform data source will be used to get the private DNS zone. + // Set to `true` to create the private zone instead. + Create bool `json:"create" yaml:"create" mapstructure:"create"` + + // The name of the private hosted zone. Example: `internal.fury-demo.sighup.io`. + Name string `json:"name" yaml:"name" mapstructure:"name"` + + // VpcId corresponds to the JSON schema field "vpcId". + VpcId string `json:"vpcId" yaml:"vpcId" mapstructure:"vpcId"` +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesIngressDNSPrivate) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["create"]; !ok || v == nil { + return fmt.Errorf("field create in SpecDistributionModulesIngressDNSPrivate: required") + } + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecDistributionModulesIngressDNSPrivate: required") + } + if v, ok := raw["vpcId"]; !ok || v == nil { + return fmt.Errorf("field vpcId in SpecDistributionModulesIngressDNSPrivate: required") + } + type Plain SpecDistributionModulesIngressDNSPrivate + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesIngressDNSPrivate(plain) + return nil +} + +type SpecDistributionModulesIngressDNSPublic struct { + // By default, a Terraform data source will be used to get the public DNS zone. + // Set to `true` to create the public zone instead. + Create bool `json:"create" yaml:"create" mapstructure:"create"` + + // The name of the public hosted zone. + Name string `json:"name" yaml:"name" mapstructure:"name"` +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesIngressDNSPublic) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["create"]; !ok || v == nil { + return fmt.Errorf("field create in SpecDistributionModulesIngressDNSPublic: required") + } + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecDistributionModulesIngressDNSPublic: required") + } + type Plain SpecDistributionModulesIngressDNSPublic + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesIngressDNSPublic(plain) + return nil +} + +// DNS definition, used in conjunction with `externalDNS` package to automate DNS +// management and certificates emission. +type SpecDistributionModulesIngressDNS struct { + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // Private corresponds to the JSON schema field "private". + Private *SpecDistributionModulesIngressDNSPrivate `json:"private,omitempty" yaml:"private,omitempty" mapstructure:"private,omitempty"` + + // Public corresponds to the JSON schema field "public". + Public *SpecDistributionModulesIngressDNSPublic `json:"public,omitempty" yaml:"public,omitempty" mapstructure:"public,omitempty"` +} + type SpecDistributionModulesIngressExternalDNS struct { // PrivateIamRoleArn corresponds to the JSON schema field "privateIamRoleArn". PrivateIamRoleArn TypesAwsArn `json:"privateIamRoleArn" yaml:"privateIamRoleArn" mapstructure:"privateIamRoleArn"` @@ -742,6 +1109,27 @@ type SpecDistributionModulesIngressExternalDNS struct { PublicIamRoleArn TypesAwsArn `json:"publicIamRoleArn" yaml:"publicIamRoleArn" mapstructure:"publicIamRoleArn"` } +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesIngressExternalDNS) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["privateIamRoleArn"]; !ok || v == nil { + return fmt.Errorf("field privateIamRoleArn in SpecDistributionModulesIngressExternalDNS: required") + } + if v, ok := raw["publicIamRoleArn"]; !ok || v == nil { + return fmt.Errorf("field publicIamRoleArn in SpecDistributionModulesIngressExternalDNS: required") + } + type Plain SpecDistributionModulesIngressExternalDNS + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesIngressExternalDNS(plain) + return nil +} + type SpecDistributionModulesIngressForecastle struct { // Overrides corresponds to the JSON schema field "overrides". Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` @@ -772,26 +1160,105 @@ type SpecDistributionModulesIngressNginxTLSProvider string const ( SpecDistributionModulesIngressNginxTLSProviderCertManager SpecDistributionModulesIngressNginxTLSProvider = "certManager" - SpecDistributionModulesIngressNginxTLSProviderNone SpecDistributionModulesIngressNginxTLSProvider = "none" SpecDistributionModulesIngressNginxTLSProviderSecret SpecDistributionModulesIngressNginxTLSProvider = "secret" + SpecDistributionModulesIngressNginxTLSProviderNone SpecDistributionModulesIngressNginxTLSProvider = "none" ) +// Kubernetes TLS secret for the ingresses TLS certificate. type SpecDistributionModulesIngressNginxTLSSecret struct { - // Ca corresponds to the JSON schema field "ca". + // The Certificate Authority certificate file's content. You can use the + // `"{file://}"` notation to get the content from a file. Ca string `json:"ca" yaml:"ca" mapstructure:"ca"` - // The certificate file content or you can use the file notation to get the - // content from a file + // The certificate file's content. You can use the `"{file://}"` notation to + // get the content from a file. Cert string `json:"cert" yaml:"cert" mapstructure:"cert"` - // Key corresponds to the JSON schema field "key". + // The signing key file's content. You can use the `"{file://}"` notation to + // get the content from a file. Key string `json:"key" yaml:"key" mapstructure:"key"` } +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesIngressNginxTLSSecret) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["ca"]; !ok || v == nil { + return fmt.Errorf("field ca in SpecDistributionModulesIngressNginxTLSSecret: required") + } + if v, ok := raw["cert"]; !ok || v == nil { + return fmt.Errorf("field cert in SpecDistributionModulesIngressNginxTLSSecret: required") + } + if v, ok := raw["key"]; !ok || v == nil { + return fmt.Errorf("field key in SpecDistributionModulesIngressNginxTLSSecret: required") + } + type Plain SpecDistributionModulesIngressNginxTLSSecret + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesIngressNginxTLSSecret(plain) + return nil +} + +type SpecDistributionModulesIngressNginxTLS struct { + // The provider of the TLS certificates for the ingresses, one of: `none`, + // `certManager`, or `secret`. + Provider SpecDistributionModulesIngressNginxTLSProvider `json:"provider" yaml:"provider" mapstructure:"provider"` + + // Secret corresponds to the JSON schema field "secret". + Secret *SpecDistributionModulesIngressNginxTLSSecret `json:"secret,omitempty" yaml:"secret,omitempty" mapstructure:"secret,omitempty"` +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesIngressNginxTLS) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["provider"]; !ok || v == nil { + return fmt.Errorf("field provider in SpecDistributionModulesIngressNginxTLS: required") + } + type Plain SpecDistributionModulesIngressNginxTLS + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesIngressNginxTLS(plain) + return nil +} + type SpecDistributionModulesIngressNginxType string +var enumValues_SpecDistributionModulesIngressNginxType = []interface{}{ + "none", + "single", + "dual", +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesIngressNginxType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesIngressNginxType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressNginxType, v) + } + *j = SpecDistributionModulesIngressNginxType(v) + return nil +} + const ( - SpecDistributionModulesIngressNginxTypeDual SpecDistributionModulesIngressNginxType = "dual" SpecDistributionModulesIngressNginxTypeNone SpecDistributionModulesIngressNginxType = "none" SpecDistributionModulesIngressNginxTypeSingle SpecDistributionModulesIngressNginxType = "single" ) @@ -816,20 +1283,37 @@ type SpecDistributionModulesLogging struct { // Cerebro corresponds to the JSON schema field "cerebro". Cerebro *SpecDistributionModulesLoggingCerebro `json:"cerebro,omitempty" yaml:"cerebro,omitempty" mapstructure:"cerebro,omitempty"` - // CustomOutputs corresponds to the JSON schema field "customOutputs". - CustomOutputs *SpecDistributionModulesLoggingCustomOutputs `json:"customOutputs,omitempty" yaml:"customOutputs,omitempty" mapstructure:"customOutputs,omitempty"` + // Set to override the node selector used to place the pods of the Ingress module. + NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` - // Loki corresponds to the JSON schema field "loki". - Loki *SpecDistributionModulesLoggingLoki `json:"loki,omitempty" yaml:"loki,omitempty" mapstructure:"loki,omitempty"` + // Set to override the tolerations that will be added to the pods of the Ingress + // module. + Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` +} - // Minio corresponds to the JSON schema field "minio". - Minio *SpecDistributionModulesLoggingMinio `json:"minio,omitempty" yaml:"minio,omitempty" mapstructure:"minio,omitempty"` +type SpecDistributionModulesIngress struct { + // The base domain used for all the KFD ingresses. If in the nginx `dual` + // configuration type, this value should be the same as the + // `.spec.distribution.modules.ingress.dns.private.name` zone. + BaseDomain string `json:"baseDomain" yaml:"baseDomain" mapstructure:"baseDomain"` - // Opensearch corresponds to the JSON schema field "opensearch". - Opensearch *SpecDistributionModulesLoggingOpensearch `json:"opensearch,omitempty" yaml:"opensearch,omitempty" mapstructure:"opensearch,omitempty"` + // Configuration for the cert-manager package. Required even if + // `ingress.nginx.type` is `none`, cert-manager is used for managing other + // certificates in the cluster besides the TLS termination certificates for the + // ingresses. + CertManager SpecDistributionModulesIngressCertManager `json:"certManager" yaml:"certManager" mapstructure:"certManager"` - // Operator corresponds to the JSON schema field "operator". - Operator *SpecDistributionModulesLoggingOperator `json:"operator,omitempty" yaml:"operator,omitempty" mapstructure:"operator,omitempty"` + // Dns corresponds to the JSON schema field "dns". + Dns *SpecDistributionModulesIngressDNS `json:"dns,omitempty" yaml:"dns,omitempty" mapstructure:"dns,omitempty"` + + // ExternalDns corresponds to the JSON schema field "externalDns". + ExternalDns SpecDistributionModulesIngressExternalDNS `json:"externalDns" yaml:"externalDns" mapstructure:"externalDns"` + + // Forecastle corresponds to the JSON schema field "forecastle". + Forecastle *SpecDistributionModulesIngressForecastle `json:"forecastle,omitempty" yaml:"forecastle,omitempty" mapstructure:"forecastle,omitempty"` + + // Configurations for the Ingress nginx controller package. + Nginx SpecDistributionModulesIngressNginx `json:"nginx" yaml:"nginx" mapstructure:"nginx"` // Overrides corresponds to the JSON schema field "overrides". Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` @@ -844,61 +1328,62 @@ type SpecDistributionModulesLogging struct { Type SpecDistributionModulesLoggingType `json:"type" yaml:"type" mapstructure:"type"` } +// DEPRECATED since KFD v1.26.6, 1.27.5, v1.28.0. type SpecDistributionModulesLoggingCerebro struct { // Overrides corresponds to the JSON schema field "overrides". Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` } -// when using the customOutputs logging type, you need to manually specify the spec -// of the several Output and ClusterOutputs that the Logging Operator expects to -// forward the logs collected by the pre-defined flows. +// When using the `customOutputs` logging type, you need to manually specify the +// spec of the several `Output` and `ClusterOutputs` that the Logging Operator +// expects to forward the logs collected by the pre-defined flows. type SpecDistributionModulesLoggingCustomOutputs struct { - // This value defines where the output from Flow will be sent. Will be the `spec` - // section of the `Output` object. It must be a string (and not a YAML object) - // following the OutputSpec definition. Use the nullout output to discard the - // flow. + // This value defines where the output from the `audit` Flow will be sent. This + // will be the `spec` section of the `Output` object. It must be a string (and not + // a YAML object) following the OutputSpec definition. Use the `nullout` output to + // discard the flow: `nullout: {}` Audit string `json:"audit" yaml:"audit" mapstructure:"audit"` - // This value defines where the output from Flow will be sent. Will be the `spec` - // section of the `Output` object. It must be a string (and not a YAML object) - // following the OutputSpec definition. Use the nullout output to discard the - // flow. + // This value defines where the output from the `errors` Flow will be sent. This + // will be the `spec` section of the `Output` object. It must be a string (and not + // a YAML object) following the OutputSpec definition. Use the `nullout` output to + // discard the flow: `nullout: {}` Errors string `json:"errors" yaml:"errors" mapstructure:"errors"` - // This value defines where the output from Flow will be sent. Will be the `spec` - // section of the `Output` object. It must be a string (and not a YAML object) - // following the OutputSpec definition. Use the nullout output to discard the - // flow. + // This value defines where the output from the `events` Flow will be sent. This + // will be the `spec` section of the `Output` object. It must be a string (and not + // a YAML object) following the OutputSpec definition. Use the `nullout` output to + // discard the flow: `nullout: {}` Events string `json:"events" yaml:"events" mapstructure:"events"` - // This value defines where the output from Flow will be sent. Will be the `spec` - // section of the `Output` object. It must be a string (and not a YAML object) - // following the OutputSpec definition. Use the nullout output to discard the - // flow. + // This value defines where the output from the `infra` Flow will be sent. This + // will be the `spec` section of the `Output` object. It must be a string (and not + // a YAML object) following the OutputSpec definition. Use the `nullout` output to + // discard the flow: `nullout: {}` Infra string `json:"infra" yaml:"infra" mapstructure:"infra"` - // This value defines where the output from Flow will be sent. Will be the `spec` - // section of the `Output` object. It must be a string (and not a YAML object) - // following the OutputSpec definition. Use the nullout output to discard the - // flow. + // This value defines where the output from the `ingressNginx` Flow will be sent. + // This will be the `spec` section of the `Output` object. It must be a string + // (and not a YAML object) following the OutputSpec definition. Use the `nullout` + // output to discard the flow: `nullout: {}` IngressNginx string `json:"ingressNginx" yaml:"ingressNginx" mapstructure:"ingressNginx"` - // This value defines where the output from Flow will be sent. Will be the `spec` - // section of the `Output` object. It must be a string (and not a YAML object) - // following the OutputSpec definition. Use the nullout output to discard the - // flow. + // This value defines where the output from the `kubernetes` Flow will be sent. + // This will be the `spec` section of the `Output` object. It must be a string + // (and not a YAML object) following the OutputSpec definition. Use the `nullout` + // output to discard the flow: `nullout: {}` Kubernetes string `json:"kubernetes" yaml:"kubernetes" mapstructure:"kubernetes"` - // This value defines where the output from Flow will be sent. Will be the `spec` - // section of the `Output` object. It must be a string (and not a YAML object) - // following the OutputSpec definition. Use the nullout output to discard the - // flow. + // This value defines where the output from the `systemdCommon` Flow will be sent. + // This will be the `spec` section of the `Output` object. It must be a string + // (and not a YAML object) following the OutputSpec definition. Use the `nullout` + // output to discard the flow: `nullout: {}` SystemdCommon string `json:"systemdCommon" yaml:"systemdCommon" mapstructure:"systemdCommon"` - // This value defines where the output from Flow will be sent. Will be the `spec` - // section of the `Output` object. It must be a string (and not a YAML object) - // following the OutputSpec definition. Use the nullout output to discard the - // flow. + // This value defines where the output from the `systemdEtcd` Flow will be sent. + // This will be the `spec` section of the `Output` object. It must be a string + // (and not a YAML object) following the OutputSpec definition. Use the `nullout` + // output to discard the flow: `nullout: {}` SystemdEtcd string `json:"systemdEtcd" yaml:"systemdEtcd" mapstructure:"systemdEtcd"` } @@ -928,24 +1413,25 @@ type SpecDistributionModulesLoggingLoki struct { type SpecDistributionModulesLoggingLokiBackend string const ( - SpecDistributionModulesLoggingLokiBackendExternalEndpoint SpecDistributionModulesLoggingLokiBackend = "externalEndpoint" SpecDistributionModulesLoggingLokiBackendMinio SpecDistributionModulesLoggingLokiBackend = "minio" + SpecDistributionModulesLoggingLokiBackendExternalEndpoint SpecDistributionModulesLoggingLokiBackend = "externalEndpoint" ) +// Configuration for Loki's external storage backend. type SpecDistributionModulesLoggingLokiExternalEndpoint struct { - // The access key id of the loki external endpoint + // The access key ID (username) for the external S3-compatible bucket. AccessKeyId *string `json:"accessKeyId,omitempty" yaml:"accessKeyId,omitempty" mapstructure:"accessKeyId,omitempty"` - // The bucket name of the loki external endpoint + // The bucket name of the external S3-compatible object storage. BucketName *string `json:"bucketName,omitempty" yaml:"bucketName,omitempty" mapstructure:"bucketName,omitempty"` - // The endpoint of the loki external endpoint + // External S3-compatible endpoint for Loki's storage. Endpoint *string `json:"endpoint,omitempty" yaml:"endpoint,omitempty" mapstructure:"endpoint,omitempty"` - // If true, the loki external endpoint will be insecure + // If true, will use HTTP as protocol instead of HTTPS. Insecure *bool `json:"insecure,omitempty" yaml:"insecure,omitempty" mapstructure:"insecure,omitempty"` - // The secret access key of the loki external endpoint + // The secret access key (password) for the external S3-compatible bucket. SecretAccessKey *string `json:"secretAccessKey,omitempty" yaml:"secretAccessKey,omitempty" mapstructure:"secretAccessKey,omitempty"` } @@ -956,7 +1442,7 @@ type SpecDistributionModulesLoggingMinio struct { // RootUser corresponds to the JSON schema field "rootUser". RootUser *SpecDistributionModulesLoggingMinioRootUser `json:"rootUser,omitempty" yaml:"rootUser,omitempty" mapstructure:"rootUser,omitempty"` - // The PVC size for each minio disk, 6 disks total + // The PVC size for each MinIO disk, 6 disks total. StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"` } @@ -975,10 +1461,11 @@ type SpecDistributionModulesLoggingOpensearch struct { // Resources corresponds to the JSON schema field "resources". Resources *TypesKubeResources `json:"resources,omitempty" yaml:"resources,omitempty" mapstructure:"resources,omitempty"` - // The storage size for the opensearch pods + // The storage size for the OpenSearch volumes. StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"` - // The type of the opensearch, must be ***single*** or ***triple*** + // The type of OpenSearch deployment. One of: `single` for a single replica or + // `triple` for an HA 3-replicas deployment. Type SpecDistributionModulesLoggingOpensearchType `json:"type" yaml:"type" mapstructure:"type"` } @@ -996,11 +1483,19 @@ type SpecDistributionModulesLoggingOperator struct { type SpecDistributionModulesLoggingType string +var enumValues_SpecDistributionModulesLoggingType = []interface{}{ + "none", + "opensearch", + "loki", + "customOutputs", +} + const ( - SpecDistributionModulesLoggingTypeCustomOutputs SpecDistributionModulesLoggingType = "customOutputs" - SpecDistributionModulesLoggingTypeLoki SpecDistributionModulesLoggingType = "loki" - SpecDistributionModulesLoggingTypeNone SpecDistributionModulesLoggingType = "none" - SpecDistributionModulesLoggingTypeOpensearch SpecDistributionModulesLoggingType = "opensearch" + SpecDistributionModulesMonitoringTypeNone SpecDistributionModulesMonitoringType = "none" + SpecDistributionModulesLoggingTypeNone SpecDistributionModulesLoggingType = "none" + SpecDistributionModulesLoggingTypeOpensearch SpecDistributionModulesLoggingType = "opensearch" + SpecDistributionModulesLoggingTypeLoki SpecDistributionModulesLoggingType = "loki" + SpecDistributionModulesLoggingTypeCustomOutputs SpecDistributionModulesLoggingType = "customOutputs" ) // configuration for the Monitoring module components @@ -1008,20 +1503,20 @@ type SpecDistributionModulesMonitoring struct { // Alertmanager corresponds to the JSON schema field "alertmanager". Alertmanager *SpecDistributionModulesMonitoringAlertManager `json:"alertmanager,omitempty" yaml:"alertmanager,omitempty" mapstructure:"alertmanager,omitempty"` - // BlackboxExporter corresponds to the JSON schema field "blackboxExporter". - BlackboxExporter *SpecDistributionModulesMonitoringBlackboxExporter `json:"blackboxExporter,omitempty" yaml:"blackboxExporter,omitempty" mapstructure:"blackboxExporter,omitempty"` + // CustomOutputs corresponds to the JSON schema field "customOutputs". + CustomOutputs *SpecDistributionModulesLoggingCustomOutputs `json:"customOutputs,omitempty" yaml:"customOutputs,omitempty" mapstructure:"customOutputs,omitempty"` - // Grafana corresponds to the JSON schema field "grafana". - Grafana *SpecDistributionModulesMonitoringGrafana `json:"grafana,omitempty" yaml:"grafana,omitempty" mapstructure:"grafana,omitempty"` + // Loki corresponds to the JSON schema field "loki". + Loki *SpecDistributionModulesLoggingLoki `json:"loki,omitempty" yaml:"loki,omitempty" mapstructure:"loki,omitempty"` - // KubeStateMetrics corresponds to the JSON schema field "kubeStateMetrics". - KubeStateMetrics *SpecDistributionModulesMonitoringKubeStateMetrics `json:"kubeStateMetrics,omitempty" yaml:"kubeStateMetrics,omitempty" mapstructure:"kubeStateMetrics,omitempty"` + // Minio corresponds to the JSON schema field "minio". + Minio *SpecDistributionModulesLoggingMinio `json:"minio,omitempty" yaml:"minio,omitempty" mapstructure:"minio,omitempty"` - // Mimir corresponds to the JSON schema field "mimir". - Mimir *SpecDistributionModulesMonitoringMimir `json:"mimir,omitempty" yaml:"mimir,omitempty" mapstructure:"mimir,omitempty"` + // Opensearch corresponds to the JSON schema field "opensearch". + Opensearch *SpecDistributionModulesLoggingOpensearch `json:"opensearch,omitempty" yaml:"opensearch,omitempty" mapstructure:"opensearch,omitempty"` - // Minio corresponds to the JSON schema field "minio". - Minio *SpecDistributionModulesMonitoringMinio `json:"minio,omitempty" yaml:"minio,omitempty" mapstructure:"minio,omitempty"` + // Operator corresponds to the JSON schema field "operator". + Operator *SpecDistributionModulesLoggingOperator `json:"operator,omitempty" yaml:"operator,omitempty" mapstructure:"operator,omitempty"` // Overrides corresponds to the JSON schema field "overrides". Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` @@ -1055,14 +1550,15 @@ type SpecDistributionModulesMonitoring struct { } type SpecDistributionModulesMonitoringAlertManager struct { - // The webhook url to send deadman switch monitoring, for example to use with - // healthchecks.io + // The webhook URL to send dead man's switch monitoring, for example to use with + // healthchecks.io. DeadManSwitchWebhookUrl *string `json:"deadManSwitchWebhookUrl,omitempty" yaml:"deadManSwitchWebhookUrl,omitempty" mapstructure:"deadManSwitchWebhookUrl,omitempty"` - // If true, the default rules will be installed + // Set to false to avoid installing the Prometheus rules (alerts) included with + // the distribution. InstallDefaultRules *bool `json:"installDefaultRules,omitempty" yaml:"installDefaultRules,omitempty" mapstructure:"installDefaultRules,omitempty"` - // The slack webhook url to send alerts + // The Slack webhook URL where to send the infrastructural and workload alerts to. SlackWebhookUrl *string `json:"slackWebhookUrl,omitempty" yaml:"slackWebhookUrl,omitempty" mapstructure:"slackWebhookUrl,omitempty"` } @@ -1118,24 +1614,25 @@ type SpecDistributionModulesMonitoringMimir struct { type SpecDistributionModulesMonitoringMimirBackend string const ( - SpecDistributionModulesMonitoringMimirBackendExternalEndpoint SpecDistributionModulesMonitoringMimirBackend = "externalEndpoint" SpecDistributionModulesMonitoringMimirBackendMinio SpecDistributionModulesMonitoringMimirBackend = "minio" + SpecDistributionModulesMonitoringMimirBackendExternalEndpoint SpecDistributionModulesMonitoringMimirBackend = "externalEndpoint" ) +// Configuration for Mimir's external storage backend. type SpecDistributionModulesMonitoringMimirExternalEndpoint struct { - // The access key id of the external mimir backend + // The access key ID (username) for the external S3-compatible bucket. AccessKeyId *string `json:"accessKeyId,omitempty" yaml:"accessKeyId,omitempty" mapstructure:"accessKeyId,omitempty"` - // The bucket name of the external mimir backend + // The bucket name of the external S3-compatible object storage. BucketName *string `json:"bucketName,omitempty" yaml:"bucketName,omitempty" mapstructure:"bucketName,omitempty"` - // The endpoint of the external mimir backend + // External S3-compatible endpoint for Mimir's storage. Endpoint *string `json:"endpoint,omitempty" yaml:"endpoint,omitempty" mapstructure:"endpoint,omitempty"` - // If true, the external mimir backend will not use tls + // If true, will use HTTP as protocol instead of HTTPS. Insecure *bool `json:"insecure,omitempty" yaml:"insecure,omitempty" mapstructure:"insecure,omitempty"` - // The secret access key of the external mimir backend + // The secret access key (password) for the external S3-compatible bucket. SecretAccessKey *string `json:"secretAccessKey,omitempty" yaml:"secretAccessKey,omitempty" mapstructure:"secretAccessKey,omitempty"` } @@ -1151,13 +1648,27 @@ type SpecDistributionModulesMonitoringMinio struct { } type SpecDistributionModulesMonitoringMinioRootUser struct { - // The password for the minio root user + // The password for the default MinIO root user. Password *string `json:"password,omitempty" yaml:"password,omitempty" mapstructure:"password,omitempty"` - // The username for the minio root user + // The username for the default MinIO root user. Username *string `json:"username,omitempty" yaml:"username,omitempty" mapstructure:"username,omitempty"` } +// Configuration for Monitoring's MinIO deployment. +type SpecDistributionModulesMonitoringMinio struct { + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // RootUser corresponds to the JSON schema field "rootUser". + RootUser *SpecDistributionModulesMonitoringMinioRootUser `json:"rootUser,omitempty" yaml:"rootUser,omitempty" mapstructure:"rootUser,omitempty"` + + // The PVC size for each MinIO disk, 6 disks total. + StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"` +} + +type SpecDistributionModulesMonitoringPrometheusRemoteWriteElem map[string]interface{} + type SpecDistributionModulesMonitoringPrometheus struct { // Set this option to ship the collected metrics to a remote Prometheus receiver. // @@ -1171,16 +1682,18 @@ type SpecDistributionModulesMonitoringPrometheus struct { // Resources corresponds to the JSON schema field "resources". Resources *TypesKubeResources `json:"resources,omitempty" yaml:"resources,omitempty" mapstructure:"resources,omitempty"` - // The retention size for the k8s Prometheus instance. + // The retention size for the `k8s` Prometheus instance. RetentionSize *string `json:"retentionSize,omitempty" yaml:"retentionSize,omitempty" mapstructure:"retentionSize,omitempty"` - // The retention time for the k8s Prometheus instance. + // The retention time for the `k8s` Prometheus instance. RetentionTime *string `json:"retentionTime,omitempty" yaml:"retentionTime,omitempty" mapstructure:"retentionTime,omitempty"` - // The storage size for the k8s Prometheus instance. + // The storage size for the `k8s` Prometheus instance. StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"` } +type SpecDistributionModulesMonitoringPrometheusAgentRemoteWriteElem map[string]interface{} + type SpecDistributionModulesMonitoringPrometheusAgent struct { // Set this option to ship the collected metrics to a remote Prometheus receiver. // @@ -1195,33 +1708,120 @@ type SpecDistributionModulesMonitoringPrometheusAgent struct { Resources *TypesKubeResources `json:"resources,omitempty" yaml:"resources,omitempty" mapstructure:"resources,omitempty"` } -type SpecDistributionModulesMonitoringPrometheusAgentRemoteWriteElem map[string]interface{} +type SpecDistributionModulesMonitoringType string -type SpecDistributionModulesMonitoringPrometheusRemoteWriteElem map[string]interface{} +var enumValues_SpecDistributionModulesMonitoringType = []interface{}{ + "none", + "prometheus", + "prometheusAgent", + "mimir", +} -type SpecDistributionModulesMonitoringType string +const TypesAwsRegionApEast1 TypesAwsRegion = "ap-east-1" + +// UnmarshalJSON implements json.Unmarshaler. +func (j *Metadata) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in Metadata: required") + } + type Plain Metadata + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + if len(plain.Name) < 1 { + return fmt.Errorf("field %s length: must be >= %d", "name", 1) + } + if len(plain.Name) > 56 { + return fmt.Errorf("field %s length: must be <= %d", "name", 56) + } + *j = Metadata(plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesNetworkingType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesNetworkingType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesNetworkingType, v) + } + *j = SpecDistributionModulesNetworkingType(v) + return nil +} const ( - SpecDistributionModulesMonitoringTypeMimir SpecDistributionModulesMonitoringType = "mimir" - SpecDistributionModulesMonitoringTypeNone SpecDistributionModulesMonitoringType = "none" - SpecDistributionModulesMonitoringTypePrometheus SpecDistributionModulesMonitoringType = "prometheus" SpecDistributionModulesMonitoringTypePrometheusAgent SpecDistributionModulesMonitoringType = "prometheusAgent" + SpecDistributionModulesMonitoringTypeMimir SpecDistributionModulesMonitoringType = "mimir" ) -type SpecDistributionModulesMonitoringX509Exporter struct { - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` +type SpecDistributionModulesMonitoringX509Exporter struct { + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` +} + +type SpecDistributionModulesNetworking struct { + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // Prometheus corresponds to the JSON schema field "prometheus". + Prometheus *SpecDistributionModulesMonitoringPrometheus `json:"prometheus,omitempty" yaml:"prometheus,omitempty" mapstructure:"prometheus,omitempty"` + + // PrometheusAgent corresponds to the JSON schema field "prometheusAgent". + PrometheusAgent *SpecDistributionModulesMonitoringPrometheusAgent `json:"prometheusAgent,omitempty" yaml:"prometheusAgent,omitempty" mapstructure:"prometheusAgent,omitempty"` + + // The type of the monitoring, must be `none`, `prometheus`, `prometheusAgent` or + // `mimir`. + // + // - `none`: will disable the whole monitoring stack. + // - `prometheus`: will install Prometheus Operator and a preconfigured Prometheus + // instace, Alertmanager, a set of alert rules, exporters needed to monitor all + // the components of the cluster, Grafana and a series of dashboards to view the + // collected metrics, and more. + // - `prometheusAgent`: wil install Prometheus operator, an instance of Prometheus + // in Agent mode (no alerting, no queries, no storage), and all the exporters + // needed to get metrics for the status of the cluster and the workloads. Useful + // when having a centralized (remote) Prometheus where to ship the metrics and not + // storing them locally in the cluster. + // - `mimir`: will install the same as the `prometheus` option, plus Grafana Mimir + // that allows for longer retention of metrics and the usage of Object Storage. + // + // Default is `prometheus`. + Type SpecDistributionModulesMonitoringType `json:"type" yaml:"type" mapstructure:"type"` + + // X509Exporter corresponds to the JSON schema field "x509Exporter". + X509Exporter *SpecDistributionModulesMonitoringX509Exporter `json:"x509Exporter,omitempty" yaml:"x509Exporter,omitempty" mapstructure:"x509Exporter,omitempty"` } -type SpecDistributionModulesNetworking struct { - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - - // TigeraOperator corresponds to the JSON schema field "tigeraOperator". - TigeraOperator *SpecDistributionModulesNetworkingTigeraOperator `json:"tigeraOperator,omitempty" yaml:"tigeraOperator,omitempty" mapstructure:"tigeraOperator,omitempty"` - - // Type corresponds to the JSON schema field "type". - Type *SpecDistributionModulesNetworkingType `json:"type,omitempty" yaml:"type,omitempty" mapstructure:"type,omitempty"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesMonitoring) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesMonitoring: required") + } + type Plain SpecDistributionModulesMonitoring + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesMonitoring(plain) + return nil } type SpecDistributionModulesNetworkingTigeraOperator struct { @@ -1253,49 +1853,143 @@ type SpecDistributionModulesPolicyGatekeeper struct { // enforce the constraints on them. AdditionalExcludedNamespaces []string `json:"additionalExcludedNamespaces,omitempty" yaml:"additionalExcludedNamespaces,omitempty" mapstructure:"additionalExcludedNamespaces,omitempty"` - // The enforcement action to use for the gatekeeper module + // The default enforcement action to use for the included constraints. `deny` will + // block the admission when violations to the policies are found, `warn` will show + // a message to the user but will admit the violating requests and `dryrun` won't + // give any feedback to the user but it will log the violations. EnforcementAction SpecDistributionModulesPolicyGatekeeperEnforcementAction `json:"enforcementAction" yaml:"enforcementAction" mapstructure:"enforcementAction"` - // If true, the default policies will be installed + // Set to `false` to avoid installing the default Gatekeeper policies (constraints + // templates and constraints) included with the distribution. InstallDefaultPolicies bool `json:"installDefaultPolicies" yaml:"installDefaultPolicies" mapstructure:"installDefaultPolicies"` // Overrides corresponds to the JSON schema field "overrides". Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` } -type SpecDistributionModulesPolicyGatekeeperEnforcementAction string +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesPolicyGatekeeper) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["enforcementAction"]; !ok || v == nil { + return fmt.Errorf("field enforcementAction in SpecDistributionModulesPolicyGatekeeper: required") + } + if v, ok := raw["installDefaultPolicies"]; !ok || v == nil { + return fmt.Errorf("field installDefaultPolicies in SpecDistributionModulesPolicyGatekeeper: required") + } + type Plain SpecDistributionModulesPolicyGatekeeper + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesPolicyGatekeeper(plain) + return nil +} + +type SpecDistributionModulesPolicyKyvernoValidationFailureAction string + +var enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction = []interface{}{ + "Audit", + "Enforce", +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesPolicyKyvernoValidationFailureAction) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction, v) + } + *j = SpecDistributionModulesPolicyKyvernoValidationFailureAction(v) + return nil +} const ( - SpecDistributionModulesPolicyGatekeeperEnforcementActionDeny SpecDistributionModulesPolicyGatekeeperEnforcementAction = "deny" - SpecDistributionModulesPolicyGatekeeperEnforcementActionDryrun SpecDistributionModulesPolicyGatekeeperEnforcementAction = "dryrun" - SpecDistributionModulesPolicyGatekeeperEnforcementActionWarn SpecDistributionModulesPolicyGatekeeperEnforcementAction = "warn" + SpecDistributionModulesPolicyKyvernoValidationFailureActionAudit SpecDistributionModulesPolicyKyvernoValidationFailureAction = "Audit" + SpecDistributionModulesPolicyKyvernoValidationFailureActionEnforce SpecDistributionModulesPolicyKyvernoValidationFailureAction = "Enforce" ) +// Configuration for the Kyverno package. type SpecDistributionModulesPolicyKyverno struct { // This parameter adds namespaces to Kyverno's exemption list, so it will not - // enforce the constraints on them. + // enforce the policies on them. AdditionalExcludedNamespaces []string `json:"additionalExcludedNamespaces,omitempty" yaml:"additionalExcludedNamespaces,omitempty" mapstructure:"additionalExcludedNamespaces,omitempty"` - // If true, the default policies will be installed + // Set to `false` to avoid installing the default Kyverno policies included with + // distribution. InstallDefaultPolicies bool `json:"installDefaultPolicies" yaml:"installDefaultPolicies" mapstructure:"installDefaultPolicies"` // Overrides corresponds to the JSON schema field "overrides". Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - // The validation failure action to use for the kyverno module + // The validation failure action to use for the policies, `Enforce` will block + // when a request does not comply with the policies and `Audit` will not block but + // log when a request does not comply with the policies. ValidationFailureAction SpecDistributionModulesPolicyKyvernoValidationFailureAction `json:"validationFailureAction" yaml:"validationFailureAction" mapstructure:"validationFailureAction"` } -type SpecDistributionModulesPolicyKyvernoValidationFailureAction string - -const ( - SpecDistributionModulesPolicyKyvernoValidationFailureActionAudit SpecDistributionModulesPolicyKyvernoValidationFailureAction = "Audit" - SpecDistributionModulesPolicyKyvernoValidationFailureActionEnforce SpecDistributionModulesPolicyKyvernoValidationFailureAction = "Enforce" -) +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesPolicyKyverno) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["installDefaultPolicies"]; !ok || v == nil { + return fmt.Errorf("field installDefaultPolicies in SpecDistributionModulesPolicyKyverno: required") + } + if v, ok := raw["validationFailureAction"]; !ok || v == nil { + return fmt.Errorf("field validationFailureAction in SpecDistributionModulesPolicyKyverno: required") + } + type Plain SpecDistributionModulesPolicyKyverno + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesPolicyKyverno(plain) + return nil +} type SpecDistributionModulesPolicyType string +var enumValues_SpecDistributionModulesPolicyType = []interface{}{ + "none", + "gatekeeper", + "kyverno", +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesPolicyType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesPolicyType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesPolicyType, v) + } + *j = SpecDistributionModulesPolicyType(v) + return nil +} + const ( + SpecDistributionModulesPolicyTypeNone SpecDistributionModulesPolicyType = "none" SpecDistributionModulesPolicyTypeGatekeeper SpecDistributionModulesPolicyType = "gatekeeper" SpecDistributionModulesPolicyTypeKyverno SpecDistributionModulesPolicyType = "kyverno" SpecDistributionModulesPolicyTypeNone SpecDistributionModulesPolicyType = "none" @@ -1308,13 +2002,40 @@ type SpecDistributionModulesTracing struct { // Overrides corresponds to the JSON schema field "overrides". Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - // Tempo corresponds to the JSON schema field "tempo". - Tempo *SpecDistributionModulesTracingTempo `json:"tempo,omitempty" yaml:"tempo,omitempty" mapstructure:"tempo,omitempty"` + // The type of policy enforcement to use, either `none`, `gatekeeper` or + // `kyverno`. + // + // Default is `none`. + Type SpecDistributionModulesPolicyType `json:"type" yaml:"type" mapstructure:"type"` +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesPolicy) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesPolicy: required") + } + type Plain SpecDistributionModulesPolicy + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesPolicy(plain) + return nil +} + +type SpecDistributionModulesTracingMinioRootUser struct { + // The password for the default MinIO root user. + Password *string `json:"password,omitempty" yaml:"password,omitempty" mapstructure:"password,omitempty"` // The type of tracing to use, either ***none*** or ***tempo*** Type SpecDistributionModulesTracingType `json:"type" yaml:"type" mapstructure:"type"` } +// Configuration for Tracing's MinIO deployment. type SpecDistributionModulesTracingMinio struct { // Overrides corresponds to the JSON schema field "overrides". Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` @@ -1322,7 +2043,7 @@ type SpecDistributionModulesTracingMinio struct { // RootUser corresponds to the JSON schema field "rootUser". RootUser *SpecDistributionModulesTracingMinioRootUser `json:"rootUser,omitempty" yaml:"rootUser,omitempty" mapstructure:"rootUser,omitempty"` - // The storage size for the minio pods + // The PVC size for each MinIO disk, 6 disks total. StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"` } @@ -1351,24 +2072,25 @@ type SpecDistributionModulesTracingTempo struct { type SpecDistributionModulesTracingTempoBackend string const ( - SpecDistributionModulesTracingTempoBackendExternalEndpoint SpecDistributionModulesTracingTempoBackend = "externalEndpoint" SpecDistributionModulesTracingTempoBackendMinio SpecDistributionModulesTracingTempoBackend = "minio" + SpecDistributionModulesTracingTempoBackendExternalEndpoint SpecDistributionModulesTracingTempoBackend = "externalEndpoint" ) +// Configuration for Tempo's external storage backend. type SpecDistributionModulesTracingTempoExternalEndpoint struct { - // The access key id of the external tempo backend + // The access key ID (username) for the external S3-compatible bucket. AccessKeyId *string `json:"accessKeyId,omitempty" yaml:"accessKeyId,omitempty" mapstructure:"accessKeyId,omitempty"` - // The bucket name of the external tempo backend + // The bucket name of the external S3-compatible object storage. BucketName *string `json:"bucketName,omitempty" yaml:"bucketName,omitempty" mapstructure:"bucketName,omitempty"` - // The endpoint of the external tempo backend + // External S3-compatible endpoint for Tempo's storage. Endpoint *string `json:"endpoint,omitempty" yaml:"endpoint,omitempty" mapstructure:"endpoint,omitempty"` - // If true, the external tempo backend will not use tls + // If true, will use HTTP as protocol instead of HTTPS. Insecure *bool `json:"insecure,omitempty" yaml:"insecure,omitempty" mapstructure:"insecure,omitempty"` - // The secret access key of the external tempo backend + // The secret access key (password) for the external S3-compatible bucket. SecretAccessKey *string `json:"secretAccessKey,omitempty" yaml:"secretAccessKey,omitempty" mapstructure:"secretAccessKey,omitempty"` } @@ -1486,8 +2208,8 @@ type SpecKubernetes struct { // pools unless overridden by a specific node pool. NodePoolGlobalAmiType *SpecKubernetesNodePoolGlobalAmiType `json:"nodePoolGlobalAmiType,omitempty" yaml:"nodePoolGlobalAmiType,omitempty" mapstructure:"nodePoolGlobalAmiType,omitempty"` - // NodePools corresponds to the JSON schema field "nodePools". - NodePools []SpecKubernetesNodePool `json:"nodePools" yaml:"nodePools" mapstructure:"nodePools"` + // Ingress corresponds to the JSON schema field "ingress". + Ingress SpecDistributionModulesIngress `json:"ingress" yaml:"ingress" mapstructure:"ingress"` // Either `launch_configurations`, `launch_templates` or `both`. For new clusters // use `launch_templates`, for existing cluster you'll need to migrate from @@ -1615,9 +2337,9 @@ type SpecKubernetesNodePool struct { Type SpecKubernetesNodePoolType `json:"type" yaml:"type" mapstructure:"type"` } -type SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock struct { - // CidrBlocks corresponds to the JSON schema field "cidrBlocks". - CidrBlocks []TypesCidr `json:"cidrBlocks" yaml:"cidrBlocks" mapstructure:"cidrBlocks"` +type SpecInfrastructureVpcNetwork struct { + // The network CIDR for the VPC that will be created + Cidr TypesCidr `json:"cidr" yaml:"cidr" mapstructure:"cidr"` // Name corresponds to the JSON schema field "name". Name string `json:"name" yaml:"name" mapstructure:"name"` @@ -1814,13 +2536,11 @@ type SpecPlugins struct { // Helm corresponds to the JSON schema field "helm". Helm *SpecPluginsHelm `json:"helm,omitempty" yaml:"helm,omitempty" mapstructure:"helm,omitempty"` - // Kustomize corresponds to the JSON schema field "kustomize". - Kustomize SpecPluginsKustomize `json:"kustomize,omitempty" yaml:"kustomize,omitempty" mapstructure:"kustomize,omitempty"` -} + // The username of the account to create in the bastion's operating system. + OperatorName *string `json:"operatorName,omitempty" yaml:"operatorName,omitempty" mapstructure:"operatorName,omitempty"` -type SpecPluginsHelm struct { - // Releases corresponds to the JSON schema field "releases". - Releases SpecPluginsHelmReleases `json:"releases,omitempty" yaml:"releases,omitempty" mapstructure:"releases,omitempty"` + // The port where each OpenVPN server will listen for connections. + Port *TypesTcpPort `json:"port,omitempty" yaml:"port,omitempty" mapstructure:"port,omitempty"` // Repositories corresponds to the JSON schema field "repositories". Repositories SpecPluginsHelmRepositories `json:"repositories,omitempty" yaml:"repositories,omitempty" mapstructure:"repositories,omitempty"` @@ -1843,40 +2563,30 @@ type SpecPluginsHelmReleases []struct { // Set corresponds to the JSON schema field "set". Set []SpecPluginsHelmReleasesElemSetElem `json:"set,omitempty" yaml:"set,omitempty" mapstructure:"set,omitempty"` - // The values of the release - Values []string `json:"values,omitempty" yaml:"values,omitempty" mapstructure:"values,omitempty"` - - // The version of the release - Version *string `json:"version,omitempty" yaml:"version,omitempty" mapstructure:"version,omitempty"` -} - -type SpecPluginsHelmReleasesElemSetElem struct { - // The name of the set - Name string `json:"name" yaml:"name" mapstructure:"name"` - - // The value of the set - Value string `json:"value" yaml:"value" mapstructure:"value"` -} - -type SpecPluginsHelmRepositories []struct { - // The name of the repository - Name string `json:"name" yaml:"name" mapstructure:"name"` - - // The url of the repository - Url string `json:"url" yaml:"url" mapstructure:"url"` -} - -type SpecPluginsKustomize []struct { - // The folder of the kustomize plugin - Folder string `json:"folder" yaml:"folder" mapstructure:"folder"` - - // The name of the kustomize plugin - Name string `json:"name" yaml:"name" mapstructure:"name"` + // The network CIDR that will be used to assign IP addresses to the VPN clients + // when connected. + VpnClientsSubnetCidr TypesCidr `json:"vpnClientsSubnetCidr" yaml:"vpnClientsSubnetCidr" mapstructure:"vpnClientsSubnetCidr"` } -type SpecToolsConfiguration struct { - // Terraform corresponds to the JSON schema field "terraform". - Terraform SpecToolsConfigurationTerraform `json:"terraform" yaml:"terraform" mapstructure:"terraform"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecInfrastructureVpn) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["ssh"]; !ok || v == nil { + return fmt.Errorf("field ssh in SpecInfrastructureVpn: required") + } + if v, ok := raw["vpnClientsSubnetCidr"]; !ok || v == nil { + return fmt.Errorf("field vpnClientsSubnetCidr in SpecInfrastructureVpn: required") + } + type Plain SpecInfrastructureVpn + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecInfrastructureVpn(plain) + return nil } type SpecToolsConfigurationTerraform struct { @@ -2122,6 +2832,20 @@ func (j *SpecDistributionModulesTracingTempoBackend) UnmarshalJSON(b []byte) err return nil } +const ( + SpecKubernetesNodePoolGlobalAmiTypeAlinux2 SpecKubernetesNodePoolGlobalAmiType = "alinux2" + SpecKubernetesNodePoolGlobalAmiTypeAlinux2023 SpecKubernetesNodePoolGlobalAmiType = "alinux2023" +) + +// Port range for the Firewall Rule. +type SpecKubernetesNodePoolAdditionalFirewallRulePorts struct { + // From corresponds to the JSON schema field "from". + From TypesTcpPort `json:"from" yaml:"from" mapstructure:"from"` + + // To corresponds to the JSON schema field "to". + To TypesTcpPort `json:"to" yaml:"to" mapstructure:"to"` +} + // UnmarshalJSON implements json.Unmarshaler. func (j *SpecInfrastructureVpcNetworkSubnetsCidrs) UnmarshalJSON(b []byte) error { var raw map[string]interface{} @@ -2143,6 +2867,17 @@ func (j *SpecInfrastructureVpcNetworkSubnetsCidrs) UnmarshalJSON(b []byte) error return nil } +type TypesAwsIpProtocol string + +type TypesAwsTags map[string]string + +type SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType string + +var enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType = []interface{}{ + "ingress", + "egress", +} + // UnmarshalJSON implements json.Unmarshaler. func (j *SpecDistributionModulesDrType) UnmarshalJSON(b []byte) error { var v string @@ -2163,6 +2898,32 @@ func (j *SpecDistributionModulesDrType) UnmarshalJSON(b []byte) error { return nil } +const ( + SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockTypeIngress SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType = "ingress" + SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockTypeEgress SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType = "egress" +) + +type SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock struct { + // CidrBlocks corresponds to the JSON schema field "cidrBlocks". + CidrBlocks []TypesCidr `json:"cidrBlocks" yaml:"cidrBlocks" mapstructure:"cidrBlocks"` + + // Name corresponds to the JSON schema field "name". + Name string `json:"name" yaml:"name" mapstructure:"name"` + + // Ports corresponds to the JSON schema field "ports". + Ports SpecKubernetesNodePoolAdditionalFirewallRulePorts `json:"ports" yaml:"ports" mapstructure:"ports"` + + // Protocol corresponds to the JSON schema field "protocol". + Protocol TypesAwsIpProtocol `json:"protocol" yaml:"protocol" mapstructure:"protocol"` + + // Additional AWS tags for the Firewall rule. + Tags TypesAwsTags `json:"tags,omitempty" yaml:"tags,omitempty" mapstructure:"tags,omitempty"` + + // The type of the Firewall rule, can be `ingress` for incoming traffic or + // `egress` for outgoing traffic. + Type SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType `json:"type" yaml:"type" mapstructure:"type"` +} + // UnmarshalJSON implements json.Unmarshaler. func (j *SpecInfrastructureVpcNetwork) UnmarshalJSON(b []byte) error { var raw map[string]interface{} diff --git a/pkg/apis/ekscluster/v1alpha2/public/schema.go b/pkg/apis/ekscluster/v1alpha2/public/schema.go index 3db9f6e1d..e44fe614d 100644 --- a/pkg/apis/ekscluster/v1alpha2/public/schema.go +++ b/pkg/apis/ekscluster/v1alpha2/public/schema.go @@ -10,7 +10,7 @@ import ( "github.com/sighupio/go-jsonschema/pkg/types" ) -// A Fury Cluster deployed through AWS's Elastic Kubernetes Service +// A KFD Cluster deployed on top of AWS's Elastic Kubernetes Service (EKS). type EksclusterKfdV1Alpha2 struct { // ApiVersion corresponds to the JSON schema field "apiVersion". ApiVersion string `json:"apiVersion" yaml:"apiVersion" mapstructure:"apiVersion"` @@ -30,7 +30,8 @@ type EksclusterKfdV1Alpha2Kind string const EksclusterKfdV1Alpha2KindEKSCluster EksclusterKfdV1Alpha2Kind = "EKSCluster" type Metadata struct { - // Name corresponds to the JSON schema field "name". + // The name of the cluster. It will also be used as a prefix for all the other + // resources created. Name string `json:"name" yaml:"name" mapstructure:"name"` } @@ -38,7 +39,9 @@ type Spec struct { // Distribution corresponds to the JSON schema field "distribution". Distribution SpecDistribution `json:"distribution" yaml:"distribution" mapstructure:"distribution"` - // DistributionVersion corresponds to the JSON schema field "distributionVersion". + // Defines which KFD version will be installed and, in consequence, the Kubernetes + // version used to create the cluster. It supports git tags and branches. Example: + // `v1.30.1`. DistributionVersion string `json:"distributionVersion" yaml:"distributionVersion" mapstructure:"distributionVersion"` // Infrastructure corresponds to the JSON schema field "infrastructure". @@ -50,14 +53,15 @@ type Spec struct { // Plugins corresponds to the JSON schema field "plugins". Plugins *SpecPlugins `json:"plugins,omitempty" yaml:"plugins,omitempty" mapstructure:"plugins,omitempty"` - // Region corresponds to the JSON schema field "region". + // Defines in which AWS region the cluster and all the related resources will be + // created. Region TypesAwsRegion `json:"region" yaml:"region" mapstructure:"region"` // This map defines which will be the common tags that will be added to all the // resources created on AWS. Tags TypesAwsTags `json:"tags,omitempty" yaml:"tags,omitempty" mapstructure:"tags,omitempty"` - // ToolsConfiguration corresponds to the JSON schema field "toolsConfiguration". + // Configuration for tools used by furyctl, like Terraform. ToolsConfiguration SpecToolsConfiguration `json:"toolsConfiguration" yaml:"toolsConfiguration" mapstructure:"toolsConfiguration"` } @@ -72,29 +76,35 @@ type SpecDistribution struct { Modules SpecDistributionModules `json:"modules" yaml:"modules" mapstructure:"modules"` } +// Common configuration for all the distribution modules. type SpecDistributionCommon struct { - // The node selector to use to place the pods for all the KFD modules + // The node selector to use to place the pods for all the KFD modules. Follows + // Kubernetes selector format. Example: `node.kubernetes.io/role: infra`. NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` // Provider corresponds to the JSON schema field "provider". Provider *SpecDistributionCommonProvider `json:"provider,omitempty" yaml:"provider,omitempty" mapstructure:"provider,omitempty"` // URL of the registry where to pull images from for the Distribution phase. - // (Default is registry.sighup.io/fury). - // - // NOTE: If plugins are pulling from the default registry, the registry will be - // replaced for these plugins too. + // (Default is `registry.sighup.io/fury`). Registry *string `json:"registry,omitempty" yaml:"registry,omitempty" mapstructure:"registry,omitempty"` - // The relative path to the vendor directory, does not need to be changed + // The relative path to the vendor directory, does not need to be changed. RelativeVendorPath *string `json:"relativeVendorPath,omitempty" yaml:"relativeVendorPath,omitempty" mapstructure:"relativeVendorPath,omitempty"` - // The tolerations that will be added to the pods for all the KFD modules + // An array with the tolerations that will be added to the pods for all the KFD + // modules. Follows Kubernetes tolerations format. Example: + // + // ```yaml + // - effect: NoSchedule + // key: node.kubernetes.io/role + // value: infra + // ``` Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` } type SpecDistributionCommonProvider struct { - // The type of the provider, must be EKS if specified + // The provider type. Don't set. FOR INTERNAL USE ONLY. Type string `json:"type" yaml:"type" mapstructure:"type"` } @@ -296,8 +306,11 @@ type SpecDistributionModules struct { Tracing *SpecDistributionModulesTracing `json:"tracing,omitempty" yaml:"tracing,omitempty" mapstructure:"tracing,omitempty"` } +// Configuration for the Auth module. type SpecDistributionModulesAuth struct { - // The base domain for the auth module + // Base domain for the ingresses created by the Auth module (Gangplank, Pomerium, + // Dex). Notice that when nginx type is dual, these will use the `external` + // ingress class. BaseDomain *string `json:"baseDomain,omitempty" yaml:"baseDomain,omitempty" mapstructure:"baseDomain,omitempty"` // Dex corresponds to the JSON schema field "dex". @@ -313,11 +326,25 @@ type SpecDistributionModulesAuth struct { Provider SpecDistributionModulesAuthProvider `json:"provider" yaml:"provider" mapstructure:"provider"` } +// Configuration for the Dex package. type SpecDistributionModulesAuthDex struct { - // The additional static clients for dex + // Additional static clients defitions that will be added to the default clients + // included with the distribution in Dex's configuration. Example: + // + // ```yaml + // additionalStaticClients: + // - id: my-custom-client + // name: "A custom additional static client" + // redirectURIs: + // - "https://myapp.tld/redirect" + // - "https://alias.tld/oidc-callback" + // secret: supersecretpassword + // ``` + // Reference: https://dexidp.io/docs/connectors/local/ AdditionalStaticClients []interface{} `json:"additionalStaticClients,omitempty" yaml:"additionalStaticClients,omitempty" mapstructure:"additionalStaticClients,omitempty"` - // The connectors for dex + // A list with each item defining a Dex connector. Follows Dex connectors + // configuration format: https://dexidp.io/docs/connectors/ Connectors []interface{} `json:"connectors" yaml:"connectors" mapstructure:"connectors"` // Expiry corresponds to the JSON schema field "expiry". @@ -335,25 +362,29 @@ type SpecDistributionModulesAuthDexExpiry struct { SigningKeys *string `json:"signingKeys,omitempty" yaml:"signingKeys,omitempty" mapstructure:"signingKeys,omitempty"` } +// Override the common configuration with a particular configuration for the Auth +// module. type SpecDistributionModulesAuthOverrides struct { - // Ingresses corresponds to the JSON schema field "ingresses". + // Override the definition of the Auth module ingresses. Ingresses SpecDistributionModulesAuthOverridesIngresses `json:"ingresses,omitempty" yaml:"ingresses,omitempty" mapstructure:"ingresses,omitempty"` - // The node selector to use to place the pods for the auth module + // Set to override the node selector used to place the pods of the Auth module. NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` - // The tolerations that will be added to the pods for the auth module + // Set to override the tolerations that will be added to the pods of the Auth + // module. Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` } type SpecDistributionModulesAuthOverridesIngress struct { - // The host of the ingress + // Use this host for the ingress instead of the default one. Host string `json:"host" yaml:"host" mapstructure:"host"` - // The ingress class of the ingress + // Use this ingress class for the ingress instead of the default one. IngressClass string `json:"ingressClass" yaml:"ingressClass" mapstructure:"ingressClass"` } +// Override the definition of the Auth module ingresses. type SpecDistributionModulesAuthOverridesIngresses map[string]SpecDistributionModulesAuthOverridesIngress type SpecDistributionModulesAuthPomerium interface{} @@ -478,15 +509,23 @@ type SpecDistributionModulesAuthProvider struct { // BasicAuth corresponds to the JSON schema field "basicAuth". BasicAuth *SpecDistributionModulesAuthProviderBasicAuth `json:"basicAuth,omitempty" yaml:"basicAuth,omitempty" mapstructure:"basicAuth,omitempty"` - // The type of the provider, must be ***none***, ***sso*** or ***basicAuth*** + // The type of the Auth provider, options are: + // - `none`: will disable authentication in the infrastructural ingresses. + // - `sso`: will protect the infrastructural ingresses with Pomerium and Dex (SSO) + // and require authentication before accessing them. + // - `basicAuth`: will protect the infrastructural ingresses with HTTP basic auth + // (username and password) authentication. + // + // Default is `none`. Type SpecDistributionModulesAuthProviderType `json:"type" yaml:"type" mapstructure:"type"` } +// Configuration for the HTTP Basic Auth provider. type SpecDistributionModulesAuthProviderBasicAuth struct { - // The password for the basic auth + // The password for logging in with the HTTP basic authentication. Password string `json:"password" yaml:"password" mapstructure:"password"` - // The username for the basic auth + // The username for logging in with the HTTP basic authentication. Username string `json:"username" yaml:"username" mapstructure:"username"` } @@ -537,11 +576,16 @@ type SpecDistributionModulesAwsLoadBalancerController struct { Overrides *TypesFuryModuleComponentOverridesWithIAMRoleName `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` } +// Configuration for the Disaster Recovery module. type SpecDistributionModulesDr struct { // Overrides corresponds to the JSON schema field "overrides". Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - // The type of the DR, must be ***none*** or ***eks*** + // The type of the Disaster Recovery, must be `none` or `eks`. `none` disables the + // module and `eks` will install Velero and use an S3 bucket to store the + // backups. + // + // Default is `none`. Type SpecDistributionModulesDrType `json:"type" yaml:"type" mapstructure:"type"` // Velero corresponds to the JSON schema field "velero". @@ -567,10 +611,10 @@ type SpecDistributionModulesDrVelero struct { } type SpecDistributionModulesDrVeleroEks struct { - // The name of the velero bucket + // The name of the bucket for Velero. BucketName TypesAwsS3BucketName `json:"bucketName" yaml:"bucketName" mapstructure:"bucketName"` - // The region where the velero bucket is located + // The region where the bucket for Velero will be located. Region TypesAwsRegion `json:"region" yaml:"region" mapstructure:"region"` } @@ -625,12 +669,15 @@ type SpecDistributionModulesDrVeleroSchedulesDefinitionsManifests struct { } type SpecDistributionModulesIngress struct { - // the base domain used for all the KFD ingresses, if in the nginx dual - // configuration, it should be the same as the - // .spec.distribution.modules.ingress.dns.private.name zone + // The base domain used for all the KFD ingresses. If in the nginx `dual` + // configuration type, this value should be the same as the + // `.spec.distribution.modules.ingress.dns.private.name` zone. BaseDomain string `json:"baseDomain" yaml:"baseDomain" mapstructure:"baseDomain"` - // CertManager corresponds to the JSON schema field "certManager". + // Configuration for the cert-manager package. Required even if + // `ingress.nginx.type` is `none`, cert-manager is used for managing other + // certificates in the cluster besides the TLS termination certificates for the + // ingresses. CertManager *SpecDistributionModulesIngressCertManager `json:"certManager,omitempty" yaml:"certManager,omitempty" mapstructure:"certManager,omitempty"` // Dns corresponds to the JSON schema field "dns". @@ -639,13 +686,17 @@ type SpecDistributionModulesIngress struct { // Forecastle corresponds to the JSON schema field "forecastle". Forecastle *SpecDistributionModulesIngressForecastle `json:"forecastle,omitempty" yaml:"forecastle,omitempty" mapstructure:"forecastle,omitempty"` - // Configurations for the nginx ingress controller module + // Configurations for the Ingress nginx controller package. Nginx SpecDistributionModulesIngressNginx `json:"nginx" yaml:"nginx" mapstructure:"nginx"` // Overrides corresponds to the JSON schema field "overrides". Overrides *SpecDistributionModulesIngressOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` } +// Configuration for the cert-manager package. Required even if +// `ingress.nginx.type` is `none`, cert-manager is used for managing other +// certificates in the cluster besides the TLS termination certificates for the +// ingresses. type SpecDistributionModulesIngressCertManager struct { // ClusterIssuer corresponds to the JSON schema field "clusterIssuer". ClusterIssuer SpecDistributionModulesIngressCertManagerClusterIssuer `json:"clusterIssuer" yaml:"clusterIssuer" mapstructure:"clusterIssuer"` @@ -654,17 +705,21 @@ type SpecDistributionModulesIngressCertManager struct { Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` } +// Configuration for the cert-manager's ACME clusterIssuer used to request +// certificates from Let's Encrypt. type SpecDistributionModulesIngressCertManagerClusterIssuer struct { - // The email of the cluster issuer + // The email address to use during the certificate issuing process. Email string `json:"email" yaml:"email" mapstructure:"email"` - // The name of the cluster issuer + // Name of the clusterIssuer. Name string `json:"name" yaml:"name" mapstructure:"name"` - // The custom solvers configurations + // List of challenge solvers to use instead of the default one for the `http01` + // challenge. Solvers []interface{} `json:"solvers,omitempty" yaml:"solvers,omitempty" mapstructure:"solvers,omitempty"` - // The type of the cluster issuer, must be ***dns01*** or ***http01*** + // The type of the clusterIssuer, must be `dns01` for using DNS challenge or + // `http01` for using HTTP challenge. Type *SpecDistributionModulesIngressCertManagerClusterIssuerType `json:"type,omitempty" yaml:"type,omitempty" mapstructure:"type,omitempty"` } @@ -675,6 +730,8 @@ const ( SpecDistributionModulesIngressCertManagerClusterIssuerTypeHttp01 SpecDistributionModulesIngressCertManagerClusterIssuerType = "http01" ) +// DNS definition, used in conjunction with `externalDNS` package to automate DNS +// management and certificates emission. type SpecDistributionModulesIngressDNS struct { // Overrides corresponds to the JSON schema field "overrides". Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` @@ -686,19 +743,23 @@ type SpecDistributionModulesIngressDNS struct { Public *SpecDistributionModulesIngressDNSPublic `json:"public,omitempty" yaml:"public,omitempty" mapstructure:"public,omitempty"` } +// The private DNS zone is used only when `ingress.nginx.type` is `dual`, for +// exposing infrastructural services only in the private DNS zone. type SpecDistributionModulesIngressDNSPrivate struct { - // If true, the private hosted zone will be created + // By default, a Terraform data source will be used to get the private DNS zone. + // Set to `true` to create the private zone instead. Create bool `json:"create" yaml:"create" mapstructure:"create"` - // The name of the private hosted zone + // The name of the private hosted zone. Example: `internal.fury-demo.sighup.io`. Name string `json:"name" yaml:"name" mapstructure:"name"` } type SpecDistributionModulesIngressDNSPublic struct { - // If true, the public hosted zone will be created + // By default, a Terraform data source will be used to get the public DNS zone. + // Set to `true` to create the public zone instead. Create bool `json:"create" yaml:"create" mapstructure:"create"` - // The name of the public hosted zone + // The name of the public hosted zone. Name string `json:"name" yaml:"name" mapstructure:"name"` } @@ -714,14 +775,24 @@ type SpecDistributionModulesIngressNginx struct { // Tls corresponds to the JSON schema field "tls". Tls *SpecDistributionModulesIngressNginxTLS `json:"tls,omitempty" yaml:"tls,omitempty" mapstructure:"tls,omitempty"` - // The type of the nginx ingress controller, must be ***none***, ***single*** or - // ***dual*** + // The type of the Ingress nginx controller, options are: + // - `none`: no ingress controller will be installed and no infrastructural + // ingresses will be created. + // - `single`: a single ingress controller with ingress class `nginx` will be + // installed to manage all the ingress resources, infrastructural ingresses will + // be created. + // - `dual`: two independent ingress controllers will be installed, one for the + // `internal` ingress class intended for private ingresses and one for the + // `external` ingress class intended for public ingresses. KFD infrastructural + // ingresses wil use the `internal` ingress class when using the dual type. + // + // Default is `single`. Type SpecDistributionModulesIngressNginxType `json:"type" yaml:"type" mapstructure:"type"` } type SpecDistributionModulesIngressNginxTLS struct { - // The provider of the TLS certificate, must be ***none***, ***certManager*** or - // ***secret*** + // The provider of the TLS certificates for the ingresses, one of: `none`, + // `certManager`, or `secret`. Provider SpecDistributionModulesIngressNginxTLSProvider `json:"provider" yaml:"provider" mapstructure:"provider"` // Secret corresponds to the JSON schema field "secret". @@ -736,15 +807,18 @@ const ( SpecDistributionModulesIngressNginxTLSProviderSecret SpecDistributionModulesIngressNginxTLSProvider = "secret" ) +// Kubernetes TLS secret for the ingresses TLS certificate. type SpecDistributionModulesIngressNginxTLSSecret struct { - // Ca corresponds to the JSON schema field "ca". + // The Certificate Authority certificate file's content. You can use the + // `"{file://}"` notation to get the content from a file. Ca string `json:"ca" yaml:"ca" mapstructure:"ca"` - // The certificate file content or you can use the file notation to get the - // content from a file + // The certificate file's content. You can use the `"{file://}"` notation to + // get the content from a file. Cert string `json:"cert" yaml:"cert" mapstructure:"cert"` - // Key corresponds to the JSON schema field "key". + // The signing key file's content. You can use the `"{file://}"` notation to + // get the content from a file. Key string `json:"key" yaml:"key" mapstructure:"key"` } @@ -756,14 +830,17 @@ const ( SpecDistributionModulesIngressNginxTypeSingle SpecDistributionModulesIngressNginxType = "single" ) +// Override the common configuration with a particular configuration for the +// Ingress module. type SpecDistributionModulesIngressOverrides struct { // Ingresses corresponds to the JSON schema field "ingresses". Ingresses *SpecDistributionModulesIngressOverridesIngresses `json:"ingresses,omitempty" yaml:"ingresses,omitempty" mapstructure:"ingresses,omitempty"` - // The node selector to use to place the pods for the ingress module + // Set to override the node selector used to place the pods of the Ingress module. NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` - // The tolerations that will be added to the pods for the ingress module + // Set to override the tolerations that will be added to the pods of the Ingress + // module. Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` } @@ -772,6 +849,7 @@ type SpecDistributionModulesIngressOverridesIngresses struct { Forecastle *TypesFuryModuleOverridesIngress `json:"forecastle,omitempty" yaml:"forecastle,omitempty" mapstructure:"forecastle,omitempty"` } +// Configuration for the Logging module. type SpecDistributionModulesLogging struct { // Cerebro corresponds to the JSON schema field "cerebro". Cerebro *SpecDistributionModulesLoggingCerebro `json:"cerebro,omitempty" yaml:"cerebro,omitempty" mapstructure:"cerebro,omitempty"` @@ -794,79 +872,87 @@ type SpecDistributionModulesLogging struct { // Overrides corresponds to the JSON schema field "overrides". Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - // selects the logging stack. Choosing none will disable the centralized logging. - // Choosing opensearch will deploy and configure the Logging Operator and an + // Selects the logging stack. Options are: + // - `none`: will disable the centralized logging. + // - `opensearch`: will deploy and configure the Logging Operator and an // OpenSearch cluster (can be single or triple for HA) where the logs will be - // stored. Choosing loki will use a distributed Grafana Loki instead of OpenSearh - // for storage. Choosing customOuput the Logging Operator will be deployed and - // installed but with no local storage, you will have to create the needed Outputs - // and ClusterOutputs to ship the logs to your desired storage. + // stored. + // - `loki`: will use a distributed Grafana Loki instead of OpenSearh for storage. + // - `customOuputs`: the Logging Operator will be deployed and installed but with + // no local storage, you will have to create the needed Outputs and ClusterOutputs + // to ship the logs to your desired storage. + // + // Default is `opensearch`. Type SpecDistributionModulesLoggingType `json:"type" yaml:"type" mapstructure:"type"` } +// DEPRECATED since KFD v1.26.6, 1.27.5, v1.28.0. type SpecDistributionModulesLoggingCerebro struct { // Overrides corresponds to the JSON schema field "overrides". Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` } -// when using the customOutputs logging type, you need to manually specify the spec -// of the several Output and ClusterOutputs that the Logging Operator expects to -// forward the logs collected by the pre-defined flows. +// When using the `customOutputs` logging type, you need to manually specify the +// spec of the several `Output` and `ClusterOutputs` that the Logging Operator +// expects to forward the logs collected by the pre-defined flows. type SpecDistributionModulesLoggingCustomOutputs struct { - // This value defines where the output from Flow will be sent. Will be the `spec` - // section of the `Output` object. It must be a string (and not a YAML object) - // following the OutputSpec definition. Use the nullout output to discard the - // flow. + // This value defines where the output from the `audit` Flow will be sent. This + // will be the `spec` section of the `Output` object. It must be a string (and not + // a YAML object) following the OutputSpec definition. Use the `nullout` output to + // discard the flow: `nullout: {}` Audit string `json:"audit" yaml:"audit" mapstructure:"audit"` - // This value defines where the output from Flow will be sent. Will be the `spec` - // section of the `Output` object. It must be a string (and not a YAML object) - // following the OutputSpec definition. Use the nullout output to discard the - // flow. + // This value defines where the output from the `errors` Flow will be sent. This + // will be the `spec` section of the `Output` object. It must be a string (and not + // a YAML object) following the OutputSpec definition. Use the `nullout` output to + // discard the flow: `nullout: {}` Errors string `json:"errors" yaml:"errors" mapstructure:"errors"` - // This value defines where the output from Flow will be sent. Will be the `spec` - // section of the `Output` object. It must be a string (and not a YAML object) - // following the OutputSpec definition. Use the nullout output to discard the - // flow. + // This value defines where the output from the `events` Flow will be sent. This + // will be the `spec` section of the `Output` object. It must be a string (and not + // a YAML object) following the OutputSpec definition. Use the `nullout` output to + // discard the flow: `nullout: {}` Events string `json:"events" yaml:"events" mapstructure:"events"` - // This value defines where the output from Flow will be sent. Will be the `spec` - // section of the `Output` object. It must be a string (and not a YAML object) - // following the OutputSpec definition. Use the nullout output to discard the - // flow. + // This value defines where the output from the `infra` Flow will be sent. This + // will be the `spec` section of the `Output` object. It must be a string (and not + // a YAML object) following the OutputSpec definition. Use the `nullout` output to + // discard the flow: `nullout: {}` Infra string `json:"infra" yaml:"infra" mapstructure:"infra"` - // This value defines where the output from Flow will be sent. Will be the `spec` - // section of the `Output` object. It must be a string (and not a YAML object) - // following the OutputSpec definition. Use the nullout output to discard the - // flow. + // This value defines where the output from the `ingressNginx` Flow will be sent. + // This will be the `spec` section of the `Output` object. It must be a string + // (and not a YAML object) following the OutputSpec definition. Use the `nullout` + // output to discard the flow: `nullout: {}` IngressNginx string `json:"ingressNginx" yaml:"ingressNginx" mapstructure:"ingressNginx"` - // This value defines where the output from Flow will be sent. Will be the `spec` - // section of the `Output` object. It must be a string (and not a YAML object) - // following the OutputSpec definition. Use the nullout output to discard the - // flow. + // This value defines where the output from the `kubernetes` Flow will be sent. + // This will be the `spec` section of the `Output` object. It must be a string + // (and not a YAML object) following the OutputSpec definition. Use the `nullout` + // output to discard the flow: `nullout: {}` Kubernetes string `json:"kubernetes" yaml:"kubernetes" mapstructure:"kubernetes"` - // This value defines where the output from Flow will be sent. Will be the `spec` - // section of the `Output` object. It must be a string (and not a YAML object) - // following the OutputSpec definition. Use the nullout output to discard the - // flow. + // This value defines where the output from the `systemdCommon` Flow will be sent. + // This will be the `spec` section of the `Output` object. It must be a string + // (and not a YAML object) following the OutputSpec definition. Use the `nullout` + // output to discard the flow: `nullout: {}` SystemdCommon string `json:"systemdCommon" yaml:"systemdCommon" mapstructure:"systemdCommon"` - // This value defines where the output from Flow will be sent. Will be the `spec` - // section of the `Output` object. It must be a string (and not a YAML object) - // following the OutputSpec definition. Use the nullout output to discard the - // flow. + // This value defines where the output from the `systemdEtcd` Flow will be sent. + // This will be the `spec` section of the `Output` object. It must be a string + // (and not a YAML object) following the OutputSpec definition. Use the `nullout` + // output to discard the flow: `nullout: {}` SystemdEtcd string `json:"systemdEtcd" yaml:"systemdEtcd" mapstructure:"systemdEtcd"` } +// Configuration for the Loki package. type SpecDistributionModulesLoggingLoki struct { - // Backend corresponds to the JSON schema field "backend". + // The storage backend type for Loki. `minio` will use an in-cluster MinIO + // deployment for object storage, `externalEndpoint` can be used to point to an + // external object storage instead of deploying an in-cluster MinIO. Backend *SpecDistributionModulesLoggingLokiBackend `json:"backend,omitempty" yaml:"backend,omitempty" mapstructure:"backend,omitempty"` - // ExternalEndpoint corresponds to the JSON schema field "externalEndpoint". + // Configuration for Loki's external storage backend. ExternalEndpoint *SpecDistributionModulesLoggingLokiExternalEndpoint `json:"externalEndpoint,omitempty" yaml:"externalEndpoint,omitempty" mapstructure:"externalEndpoint,omitempty"` // Resources corresponds to the JSON schema field "resources". @@ -892,23 +978,25 @@ const ( SpecDistributionModulesLoggingLokiBackendMinio SpecDistributionModulesLoggingLokiBackend = "minio" ) +// Configuration for Loki's external storage backend. type SpecDistributionModulesLoggingLokiExternalEndpoint struct { - // The access key id of the loki external endpoint + // The access key ID (username) for the external S3-compatible bucket. AccessKeyId *string `json:"accessKeyId,omitempty" yaml:"accessKeyId,omitempty" mapstructure:"accessKeyId,omitempty"` - // The bucket name of the loki external endpoint + // The bucket name of the external S3-compatible object storage. BucketName *string `json:"bucketName,omitempty" yaml:"bucketName,omitempty" mapstructure:"bucketName,omitempty"` - // The endpoint of the loki external endpoint + // External S3-compatible endpoint for Loki's storage. Endpoint *string `json:"endpoint,omitempty" yaml:"endpoint,omitempty" mapstructure:"endpoint,omitempty"` - // If true, the loki external endpoint will be insecure + // If true, will use HTTP as protocol instead of HTTPS. Insecure *bool `json:"insecure,omitempty" yaml:"insecure,omitempty" mapstructure:"insecure,omitempty"` - // The secret access key of the loki external endpoint + // The secret access key (password) for the external S3-compatible bucket. SecretAccessKey *string `json:"secretAccessKey,omitempty" yaml:"secretAccessKey,omitempty" mapstructure:"secretAccessKey,omitempty"` } +// Configuration for Logging's MinIO deployment. type SpecDistributionModulesLoggingMinio struct { // Overrides corresponds to the JSON schema field "overrides". Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` @@ -916,15 +1004,15 @@ type SpecDistributionModulesLoggingMinio struct { // RootUser corresponds to the JSON schema field "rootUser". RootUser *SpecDistributionModulesLoggingMinioRootUser `json:"rootUser,omitempty" yaml:"rootUser,omitempty" mapstructure:"rootUser,omitempty"` - // The PVC size for each minio disk, 6 disks total + // The PVC size for each MinIO disk, 6 disks total. StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"` } type SpecDistributionModulesLoggingMinioRootUser struct { - // The password of the minio root user + // The password for the default MinIO root user. Password *string `json:"password,omitempty" yaml:"password,omitempty" mapstructure:"password,omitempty"` - // The username of the minio root user + // The username for the default MinIO root user. Username *string `json:"username,omitempty" yaml:"username,omitempty" mapstructure:"username,omitempty"` } @@ -935,10 +1023,11 @@ type SpecDistributionModulesLoggingOpensearch struct { // Resources corresponds to the JSON schema field "resources". Resources *TypesKubeResources `json:"resources,omitempty" yaml:"resources,omitempty" mapstructure:"resources,omitempty"` - // The storage size for the opensearch pods + // The storage size for the OpenSearch volumes. StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"` - // The type of the opensearch, must be ***single*** or ***triple*** + // The type of OpenSearch deployment. One of: `single` for a single replica or + // `triple` for an HA 3-replicas deployment. Type SpecDistributionModulesLoggingOpensearchType `json:"type" yaml:"type" mapstructure:"type"` } @@ -949,6 +1038,7 @@ const ( SpecDistributionModulesLoggingOpensearchTypeTriple SpecDistributionModulesLoggingOpensearchType = "triple" ) +// Configuration for the Logging Operator. type SpecDistributionModulesLoggingOperator struct { // Overrides corresponds to the JSON schema field "overrides". Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` @@ -963,7 +1053,7 @@ const ( SpecDistributionModulesLoggingTypeOpensearch SpecDistributionModulesLoggingType = "opensearch" ) -// configuration for the Monitoring module components +// Configuration for the Monitoring module. type SpecDistributionModulesMonitoring struct { // Alertmanager corresponds to the JSON schema field "alertmanager". Alertmanager *SpecDistributionModulesMonitoringAlertManager `json:"alertmanager,omitempty" yaml:"alertmanager,omitempty" mapstructure:"alertmanager,omitempty"` @@ -992,12 +1082,12 @@ type SpecDistributionModulesMonitoring struct { // PrometheusAgent corresponds to the JSON schema field "prometheusAgent". PrometheusAgent *SpecDistributionModulesMonitoringPrometheusAgent `json:"prometheusAgent,omitempty" yaml:"prometheusAgent,omitempty" mapstructure:"prometheusAgent,omitempty"` - // The type of the monitoring, must be ***none***, ***prometheus***, - // ***prometheusAgent*** or ***mimir***. + // The type of the monitoring, must be `none`, `prometheus`, `prometheusAgent` or + // `mimir`. // // - `none`: will disable the whole monitoring stack. // - `prometheus`: will install Prometheus Operator and a preconfigured Prometheus - // instance, Alertmanager, a set of alert rules, exporters needed to monitor all + // instace, Alertmanager, a set of alert rules, exporters needed to monitor all // the components of the cluster, Grafana and a series of dashboards to view the // collected metrics, and more. // - `prometheusAgent`: wil install Prometheus operator, an instance of Prometheus @@ -1005,9 +1095,10 @@ type SpecDistributionModulesMonitoring struct { // needed to get metrics for the status of the cluster and the workloads. Useful // when having a centralized (remote) Prometheus where to ship the metrics and not // storing them locally in the cluster. - // - `mimir`: will install the same as the `prometheus` option, and in addition - // Grafana Mimir that allows for longer retention of metrics and the usage of - // Object Storage. + // - `mimir`: will install the same as the `prometheus` option, plus Grafana Mimir + // that allows for longer retention of metrics and the usage of Object Storage. + // + // Default is `prometheus`. Type SpecDistributionModulesMonitoringType `json:"type" yaml:"type" mapstructure:"type"` // X509Exporter corresponds to the JSON schema field "x509Exporter". @@ -1015,14 +1106,15 @@ type SpecDistributionModulesMonitoring struct { } type SpecDistributionModulesMonitoringAlertManager struct { - // The webhook url to send deadman switch monitoring, for example to use with - // healthchecks.io + // The webhook URL to send dead man's switch monitoring, for example to use with + // healthchecks.io. DeadManSwitchWebhookUrl *string `json:"deadManSwitchWebhookUrl,omitempty" yaml:"deadManSwitchWebhookUrl,omitempty" mapstructure:"deadManSwitchWebhookUrl,omitempty"` - // If true, the default rules will be installed + // Set to false to avoid installing the Prometheus rules (alerts) included with + // the distribution. InstallDefaultRules *bool `json:"installDefaultRules,omitempty" yaml:"installDefaultRules,omitempty" mapstructure:"installDefaultRules,omitempty"` - // The slack webhook url to send alerts + // The Slack webhook URL where to send the infrastructural and workload alerts to. SlackWebhookUrl *string `json:"slackWebhookUrl,omitempty" yaml:"slackWebhookUrl,omitempty" mapstructure:"slackWebhookUrl,omitempty"` } @@ -1061,17 +1153,22 @@ type SpecDistributionModulesMonitoringKubeStateMetrics struct { Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` } +// Configuration for the Mimir package. type SpecDistributionModulesMonitoringMimir struct { - // The backend for the mimir pods, must be ***minio*** or ***externalEndpoint*** + // The storage backend type for Mimir. `minio` will use an in-cluster MinIO + // deployment for object storage, `externalEndpoint` can be used to point to an + // external S3-compatible object storage instead of deploying an in-cluster MinIO. Backend *SpecDistributionModulesMonitoringMimirBackend `json:"backend,omitempty" yaml:"backend,omitempty" mapstructure:"backend,omitempty"` - // ExternalEndpoint corresponds to the JSON schema field "externalEndpoint". + // Configuration for Mimir's external storage backend. ExternalEndpoint *SpecDistributionModulesMonitoringMimirExternalEndpoint `json:"externalEndpoint,omitempty" yaml:"externalEndpoint,omitempty" mapstructure:"externalEndpoint,omitempty"` // Overrides corresponds to the JSON schema field "overrides". Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - // The retention time for the mimir pods + // The retention time for the logs stored in Mimir. Default is `30d`. Value must + // match the regular expression `[0-9]+(ns|us|µs|ms|s|m|h|d|w|y)` where y = 365 + // days. RetentionTime *string `json:"retentionTime,omitempty" yaml:"retentionTime,omitempty" mapstructure:"retentionTime,omitempty"` } @@ -1082,23 +1179,25 @@ const ( SpecDistributionModulesMonitoringMimirBackendMinio SpecDistributionModulesMonitoringMimirBackend = "minio" ) +// Configuration for Mimir's external storage backend. type SpecDistributionModulesMonitoringMimirExternalEndpoint struct { - // The access key id of the external mimir backend + // The access key ID (username) for the external S3-compatible bucket. AccessKeyId *string `json:"accessKeyId,omitempty" yaml:"accessKeyId,omitempty" mapstructure:"accessKeyId,omitempty"` - // The bucket name of the external mimir backend + // The bucket name of the external S3-compatible object storage. BucketName *string `json:"bucketName,omitempty" yaml:"bucketName,omitempty" mapstructure:"bucketName,omitempty"` - // The endpoint of the external mimir backend + // External S3-compatible endpoint for Mimir's storage. Endpoint *string `json:"endpoint,omitempty" yaml:"endpoint,omitempty" mapstructure:"endpoint,omitempty"` - // If true, the external mimir backend will not use tls + // If true, will use HTTP as protocol instead of HTTPS. Insecure *bool `json:"insecure,omitempty" yaml:"insecure,omitempty" mapstructure:"insecure,omitempty"` - // The secret access key of the external mimir backend + // The secret access key (password) for the external S3-compatible bucket. SecretAccessKey *string `json:"secretAccessKey,omitempty" yaml:"secretAccessKey,omitempty" mapstructure:"secretAccessKey,omitempty"` } +// Configuration for Monitoring's MinIO deployment. type SpecDistributionModulesMonitoringMinio struct { // Overrides corresponds to the JSON schema field "overrides". Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` @@ -1106,15 +1205,15 @@ type SpecDistributionModulesMonitoringMinio struct { // RootUser corresponds to the JSON schema field "rootUser". RootUser *SpecDistributionModulesMonitoringMinioRootUser `json:"rootUser,omitempty" yaml:"rootUser,omitempty" mapstructure:"rootUser,omitempty"` - // The storage size for the minio pods + // The PVC size for each MinIO disk, 6 disks total. StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"` } type SpecDistributionModulesMonitoringMinioRootUser struct { - // The password for the minio root user + // The password for the default MinIO root user. Password *string `json:"password,omitempty" yaml:"password,omitempty" mapstructure:"password,omitempty"` - // The username for the minio root user + // The username for the default MinIO root user. Username *string `json:"username,omitempty" yaml:"username,omitempty" mapstructure:"username,omitempty"` } @@ -1131,13 +1230,13 @@ type SpecDistributionModulesMonitoringPrometheus struct { // Resources corresponds to the JSON schema field "resources". Resources *TypesKubeResources `json:"resources,omitempty" yaml:"resources,omitempty" mapstructure:"resources,omitempty"` - // The retention size for the k8s Prometheus instance. + // The retention size for the `k8s` Prometheus instance. RetentionSize *string `json:"retentionSize,omitempty" yaml:"retentionSize,omitempty" mapstructure:"retentionSize,omitempty"` - // The retention time for the k8s Prometheus instance. + // The retention time for the `k8s` Prometheus instance. RetentionTime *string `json:"retentionTime,omitempty" yaml:"retentionTime,omitempty" mapstructure:"retentionTime,omitempty"` - // The storage size for the k8s Prometheus instance. + // The storage size for the `k8s` Prometheus instance. StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"` } @@ -1173,9 +1272,10 @@ type SpecDistributionModulesMonitoringX509Exporter struct { Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` } +// Configuration for the Networking module. type SpecDistributionModulesNetworking struct { // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` // TigeraOperator corresponds to the JSON schema field "tigeraOperator". TigeraOperator *SpecDistributionModulesNetworkingTigeraOperator `json:"tigeraOperator,omitempty" yaml:"tigeraOperator,omitempty" mapstructure:"tigeraOperator,omitempty"` @@ -1186,6 +1286,7 @@ type SpecDistributionModulesNetworkingTigeraOperator struct { Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` } +// Configuration for the Policy module. type SpecDistributionModulesPolicy struct { // Gatekeeper corresponds to the JSON schema field "gatekeeper". Gatekeeper *SpecDistributionModulesPolicyGatekeeper `json:"gatekeeper,omitempty" yaml:"gatekeeper,omitempty" mapstructure:"gatekeeper,omitempty"` @@ -1196,20 +1297,27 @@ type SpecDistributionModulesPolicy struct { // Overrides corresponds to the JSON schema field "overrides". Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - // The type of security to use, either ***none***, ***gatekeeper*** or - // ***kyverno*** + // The type of policy enforcement to use, either `none`, `gatekeeper` or + // `kyverno`. + // + // Default is `none`. Type SpecDistributionModulesPolicyType `json:"type" yaml:"type" mapstructure:"type"` } +// Configuration for the Gatekeeper package. type SpecDistributionModulesPolicyGatekeeper struct { // This parameter adds namespaces to Gatekeeper's exemption list, so it will not // enforce the constraints on them. AdditionalExcludedNamespaces []string `json:"additionalExcludedNamespaces,omitempty" yaml:"additionalExcludedNamespaces,omitempty" mapstructure:"additionalExcludedNamespaces,omitempty"` - // The enforcement action to use for the gatekeeper module + // The default enforcement action to use for the included constraints. `deny` will + // block the admission when violations to the policies are found, `warn` will show + // a message to the user but will admit the violating requests and `dryrun` won't + // give any feedback to the user but it will log the violations. EnforcementAction SpecDistributionModulesPolicyGatekeeperEnforcementAction `json:"enforcementAction" yaml:"enforcementAction" mapstructure:"enforcementAction"` - // If true, the default policies will be installed + // Set to `false` to avoid installing the default Gatekeeper policies (constraints + // templates and constraints) included with the distribution. InstallDefaultPolicies bool `json:"installDefaultPolicies" yaml:"installDefaultPolicies" mapstructure:"installDefaultPolicies"` // Overrides corresponds to the JSON schema field "overrides". @@ -1224,18 +1332,22 @@ const ( SpecDistributionModulesPolicyGatekeeperEnforcementActionWarn SpecDistributionModulesPolicyGatekeeperEnforcementAction = "warn" ) +// Configuration for the Kyverno package. type SpecDistributionModulesPolicyKyverno struct { // This parameter adds namespaces to Kyverno's exemption list, so it will not - // enforce the constraints on them. + // enforce the policies on them. AdditionalExcludedNamespaces []string `json:"additionalExcludedNamespaces,omitempty" yaml:"additionalExcludedNamespaces,omitempty" mapstructure:"additionalExcludedNamespaces,omitempty"` - // If true, the default policies will be installed + // Set to `false` to avoid installing the default Kyverno policies included with + // distribution. InstallDefaultPolicies bool `json:"installDefaultPolicies" yaml:"installDefaultPolicies" mapstructure:"installDefaultPolicies"` // Overrides corresponds to the JSON schema field "overrides". Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - // The validation failure action to use for the kyverno module + // The validation failure action to use for the policies, `Enforce` will block + // when a request does not comply with the policies and `Audit` will not block but + // log when a request does not comply with the policies. ValidationFailureAction SpecDistributionModulesPolicyKyvernoValidationFailureAction `json:"validationFailureAction" yaml:"validationFailureAction" mapstructure:"validationFailureAction"` } @@ -1254,6 +1366,7 @@ const ( SpecDistributionModulesPolicyTypeNone SpecDistributionModulesPolicyType = "none" ) +// Configuration for the Tracing module. type SpecDistributionModulesTracing struct { // Minio corresponds to the JSON schema field "minio". Minio *SpecDistributionModulesTracingMinio `json:"minio,omitempty" yaml:"minio,omitempty" mapstructure:"minio,omitempty"` @@ -1264,10 +1377,14 @@ type SpecDistributionModulesTracing struct { // Tempo corresponds to the JSON schema field "tempo". Tempo *SpecDistributionModulesTracingTempo `json:"tempo,omitempty" yaml:"tempo,omitempty" mapstructure:"tempo,omitempty"` - // The type of tracing to use, either ***none*** or ***tempo*** + // The type of tracing to use, either `none` or `tempo`. `none` will disable the + // Tracing module and `tempo` will install a Grafana Tempo deployment. + // + // Default is `tempo`. Type SpecDistributionModulesTracingType `json:"type" yaml:"type" mapstructure:"type"` } +// Configuration for Tracing's MinIO deployment. type SpecDistributionModulesTracingMinio struct { // Overrides corresponds to the JSON schema field "overrides". Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` @@ -1275,29 +1392,32 @@ type SpecDistributionModulesTracingMinio struct { // RootUser corresponds to the JSON schema field "rootUser". RootUser *SpecDistributionModulesTracingMinioRootUser `json:"rootUser,omitempty" yaml:"rootUser,omitempty" mapstructure:"rootUser,omitempty"` - // The storage size for the minio pods + // The PVC size for each MinIO disk, 6 disks total. StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"` } type SpecDistributionModulesTracingMinioRootUser struct { - // The password for the minio root user + // The password for the default MinIO root user. Password *string `json:"password,omitempty" yaml:"password,omitempty" mapstructure:"password,omitempty"` - // The username for the minio root user + // The username for the default MinIO root user. Username *string `json:"username,omitempty" yaml:"username,omitempty" mapstructure:"username,omitempty"` } +// Configuration for the Tempo package. type SpecDistributionModulesTracingTempo struct { - // The backend for the tempo pods, must be ***minio*** or ***externalEndpoint*** + // The storage backend type for Tempo. `minio` will use an in-cluster MinIO + // deployment for object storage, `externalEndpoint` can be used to point to an + // external S3-compatible object storage instead of deploying an in-cluster MinIO. Backend *SpecDistributionModulesTracingTempoBackend `json:"backend,omitempty" yaml:"backend,omitempty" mapstructure:"backend,omitempty"` - // ExternalEndpoint corresponds to the JSON schema field "externalEndpoint". + // Configuration for Tempo's external storage backend. ExternalEndpoint *SpecDistributionModulesTracingTempoExternalEndpoint `json:"externalEndpoint,omitempty" yaml:"externalEndpoint,omitempty" mapstructure:"externalEndpoint,omitempty"` // Overrides corresponds to the JSON schema field "overrides". Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - // The retention time for the tempo pods + // The retention time for the traces stored in Tempo. RetentionTime *string `json:"retentionTime,omitempty" yaml:"retentionTime,omitempty" mapstructure:"retentionTime,omitempty"` } @@ -1308,20 +1428,21 @@ const ( SpecDistributionModulesTracingTempoBackendMinio SpecDistributionModulesTracingTempoBackend = "minio" ) +// Configuration for Tempo's external storage backend. type SpecDistributionModulesTracingTempoExternalEndpoint struct { - // The access key id of the external tempo backend + // The access key ID (username) for the external S3-compatible bucket. AccessKeyId *string `json:"accessKeyId,omitempty" yaml:"accessKeyId,omitempty" mapstructure:"accessKeyId,omitempty"` - // The bucket name of the external tempo backend + // The bucket name of the external S3-compatible object storage. BucketName *string `json:"bucketName,omitempty" yaml:"bucketName,omitempty" mapstructure:"bucketName,omitempty"` - // The endpoint of the external tempo backend + // External S3-compatible endpoint for Tempo's storage. Endpoint *string `json:"endpoint,omitempty" yaml:"endpoint,omitempty" mapstructure:"endpoint,omitempty"` - // If true, the external tempo backend will not use tls + // If true, will use HTTP as protocol instead of HTTPS. Insecure *bool `json:"insecure,omitempty" yaml:"insecure,omitempty" mapstructure:"insecure,omitempty"` - // The secret access key of the external tempo backend + // The secret access key (password) for the external S3-compatible bucket. SecretAccessKey *string `json:"secretAccessKey,omitempty" yaml:"secretAccessKey,omitempty" mapstructure:"secretAccessKey,omitempty"` } @@ -1333,88 +1454,98 @@ const ( ) type SpecInfrastructure struct { - // This key defines the VPC that will be created in AWS + // Vpc corresponds to the JSON schema field "vpc". Vpc *SpecInfrastructureVpc `json:"vpc,omitempty" yaml:"vpc,omitempty" mapstructure:"vpc,omitempty"` - // This section defines the creation of VPN bastions + // Vpn corresponds to the JSON schema field "vpn". Vpn *SpecInfrastructureVpn `json:"vpn,omitempty" yaml:"vpn,omitempty" mapstructure:"vpn,omitempty"` } +// Configuration for the VPC that will be created to host the EKS cluster and its +// related resources. If you already have a VPC that you want to use, leave this +// section empty and use `.spec.kubernetes.vpcId` instead. type SpecInfrastructureVpc struct { // Network corresponds to the JSON schema field "network". Network SpecInfrastructureVpcNetwork `json:"network" yaml:"network" mapstructure:"network"` } type SpecInfrastructureVpcNetwork struct { - // This is the CIDR of the VPC that will be created + // The network CIDR for the VPC that will be created Cidr TypesCidr `json:"cidr" yaml:"cidr" mapstructure:"cidr"` // SubnetsCidrs corresponds to the JSON schema field "subnetsCidrs". SubnetsCidrs SpecInfrastructureVpcNetworkSubnetsCidrs `json:"subnetsCidrs" yaml:"subnetsCidrs" mapstructure:"subnetsCidrs"` } +// Network CIDRS configuration for private and public subnets. type SpecInfrastructureVpcNetworkSubnetsCidrs struct { - // These are the CIRDs for the private subnets, where the nodes, the pods, and the + // Network CIRDs for the private subnets, where the nodes, the pods, and the // private load balancers will be created Private []TypesCidr `json:"private" yaml:"private" mapstructure:"private"` - // These are the CIDRs for the public subnets, where the public load balancers and - // the VPN servers will be created + // Network CIDRs for the public subnets, where the public load balancers and the + // VPN servers will be created Public []TypesCidr `json:"public" yaml:"public" mapstructure:"public"` } +// Configuration for the VPN server instances. type SpecInfrastructureVpn struct { - // This value defines the prefix that will be used to create the bucket name where - // the VPN servers will store the states + // This value defines the prefix for the bucket name where the VPN servers will + // store their state (VPN certificates, users). BucketNamePrefix *TypesAwsS3BucketNamePrefix `json:"bucketNamePrefix,omitempty" yaml:"bucketNamePrefix,omitempty" mapstructure:"bucketNamePrefix,omitempty"` - // The dhParamsBits size used for the creation of the .pem file that will be used - // in the dh openvpn server.conf file + // The `dhParamsBits` size used for the creation of the .pem file that will be + // used in the dh openvpn server.conf file. DhParamsBits *int `json:"dhParamsBits,omitempty" yaml:"dhParamsBits,omitempty" mapstructure:"dhParamsBits,omitempty"` - // The size of the disk in GB + // The size of the disk in GB for each VPN server. Example: entering `50` will + // create disks of 50 GB. DiskSize *int `json:"diskSize,omitempty" yaml:"diskSize,omitempty" mapstructure:"diskSize,omitempty"` - // Overrides the default IAM user name for the VPN + // Overrides IAM user name for the VPN. Default is to use the cluster name. IamUserNameOverride *TypesAwsIamRoleName `json:"iamUserNameOverride,omitempty" yaml:"iamUserNameOverride,omitempty" mapstructure:"iamUserNameOverride,omitempty"` - // The size of the AWS EC2 instance + // The type of the AWS EC2 instance for each VPN server. Follows AWS EC2 + // nomenclature. Example: `t3-micro`. InstanceType *string `json:"instanceType,omitempty" yaml:"instanceType,omitempty" mapstructure:"instanceType,omitempty"` - // The number of instances to create, 0 to skip the creation + // The number of VPN server instances to create, `0` to skip the creation. Instances *int `json:"instances,omitempty" yaml:"instances,omitempty" mapstructure:"instances,omitempty"` - // The username of the account to create in the bastion's operating system + // The username of the account to create in the bastion's operating system. OperatorName *string `json:"operatorName,omitempty" yaml:"operatorName,omitempty" mapstructure:"operatorName,omitempty"` - // The port used by the OpenVPN server + // The port where each OpenVPN server will listen for connections. Port *TypesTcpPort `json:"port,omitempty" yaml:"port,omitempty" mapstructure:"port,omitempty"` // Ssh corresponds to the JSON schema field "ssh". Ssh SpecInfrastructureVpnSsh `json:"ssh" yaml:"ssh" mapstructure:"ssh"` - // The VPC ID where the VPN servers will be created, required only if - // .spec.infrastructure.vpc is omitted + // The ID of the VPC where the VPN server instances will be created, required only + // if `.spec.infrastructure.vpc` is omitted. VpcId *TypesAwsVpcId `json:"vpcId,omitempty" yaml:"vpcId,omitempty" mapstructure:"vpcId,omitempty"` - // The CIDR that will be used to assign IP addresses to the VPN clients when - // connected + // The network CIDR that will be used to assign IP addresses to the VPN clients + // when connected. VpnClientsSubnetCidr TypesCidr `json:"vpnClientsSubnetCidr" yaml:"vpnClientsSubnetCidr" mapstructure:"vpnClientsSubnetCidr"` } type SpecInfrastructureVpnSsh struct { - // The CIDR enabled in the security group that can access the bastions in SSH + // The network CIDR enabled in the security group to access the VPN servers + // (bastions) via SSH. Setting this to `0.0.0.0/0` will allow any source. AllowedFromCidrs []TypesCidr `json:"allowedFromCidrs" yaml:"allowedFromCidrs" mapstructure:"allowedFromCidrs"` - // The github user name list that will be used to get the ssh public key that will - // be added as authorized key to the operatorName user + // List of GitHub usernames from whom get their SSH public key and add as + // authorized keys of the `operatorName` user. GithubUsersName []string `json:"githubUsersName" yaml:"githubUsersName" mapstructure:"githubUsersName"` - // This value defines the public keys that will be added to the bastion's - // operating system NOTES: Not yet implemented + // **NOT IN USE**, use `githubUsersName` instead. This value defines the public + // keys that will be added to the bastion's operating system. PublicKeys []interface{} `json:"publicKeys,omitempty" yaml:"publicKeys,omitempty" mapstructure:"publicKeys,omitempty"` } +// Defines the Kubernetes components configuration and the values needed for the +// `kubernetes` phase of furyctl. type SpecKubernetes struct { // ApiServer corresponds to the JSON schema field "apiServer". ApiServer SpecKubernetesAPIServer `json:"apiServer" yaml:"apiServer" mapstructure:"apiServer"` @@ -1422,17 +1553,20 @@ type SpecKubernetes struct { // AwsAuth corresponds to the JSON schema field "awsAuth". AwsAuth *SpecKubernetesAwsAuth `json:"awsAuth,omitempty" yaml:"awsAuth,omitempty" mapstructure:"awsAuth,omitempty"` - // Overrides the default IAM role name prefix for the EKS cluster + // Overrides the default prefix for the IAM role name of the EKS cluster. If not + // set, a name will be generated from the cluster name. ClusterIAMRoleNamePrefixOverride *TypesAwsIamRoleNamePrefix `json:"clusterIAMRoleNamePrefixOverride,omitempty" yaml:"clusterIAMRoleNamePrefixOverride,omitempty" mapstructure:"clusterIAMRoleNamePrefixOverride,omitempty"` - // Optional Kubernetes Cluster log retention in days. Defaults to 90 days. - LogRetentionDays *int `json:"logRetentionDays,omitempty" yaml:"logRetentionDays,omitempty" mapstructure:"logRetentionDays,omitempty"` + // Optional Kubernetes Cluster log retention in CloudWatch, expressed in days. + // Setting the value to zero (`0`) makes retention last forever. Default is `90` + // days. + LogRetentionDays *SpecKubernetesLogRetentionDays `json:"logRetentionDays,omitempty" yaml:"logRetentionDays,omitempty" mapstructure:"logRetentionDays,omitempty"` // Optional list of Kubernetes Cluster log types to enable. Defaults to all types. LogsTypes []SpecKubernetesLogsTypesElem `json:"logsTypes,omitempty" yaml:"logsTypes,omitempty" mapstructure:"logsTypes,omitempty"` - // This key contains the ssh public key that can connect to the nodes via SSH - // using the ec2-user user + // The SSH public key that can connect to the nodes via SSH using the `ec2-user` + // user. Example: the contents of your `~/.ssh/id_ras.pub` file. NodeAllowedSshPublicKey interface{} `json:"nodeAllowedSshPublicKey" yaml:"nodeAllowedSshPublicKey" mapstructure:"nodeAllowedSshPublicKey"` // Global default AMI type used for EKS worker nodes. This will apply to all node @@ -1442,55 +1576,62 @@ type SpecKubernetes struct { // NodePools corresponds to the JSON schema field "nodePools". NodePools []SpecKubernetesNodePool `json:"nodePools" yaml:"nodePools" mapstructure:"nodePools"` - // Either `launch_configurations`, `launch_templates` or `both`. For new clusters - // use `launch_templates`, for existing cluster you'll need to migrate from - // `launch_configurations` to `launch_templates` using `both` as interim. + // Accepted values are `launch_configurations`, `launch_templates` or `both`. For + // new clusters use `launch_templates`, for adopting existing cluster you'll need + // to migrate from `launch_configurations` to `launch_templates` using `both` as + // interim. NodePoolsLaunchKind SpecKubernetesNodePoolsLaunchKind `json:"nodePoolsLaunchKind" yaml:"nodePoolsLaunchKind" mapstructure:"nodePoolsLaunchKind"` - // This value defines the CIDR that will be used to assign IP addresses to the - // services + // This value defines the network CIDR that will be used to assign IP addresses to + // Kubernetes services. ServiceIpV4Cidr *TypesCidr `json:"serviceIpV4Cidr,omitempty" yaml:"serviceIpV4Cidr,omitempty" mapstructure:"serviceIpV4Cidr,omitempty"` - // This value defines the subnet IDs where the EKS cluster will be created, - // required only if .spec.infrastructure.vpc is omitted + // Required only if `.spec.infrastructure.vpc` is omitted. This value defines the + // ID of the subnet where the EKS cluster will be created. SubnetIds []TypesAwsSubnetId `json:"subnetIds,omitempty" yaml:"subnetIds,omitempty" mapstructure:"subnetIds,omitempty"` - // This value defines the VPC ID where the EKS cluster will be created, required - // only if .spec.infrastructure.vpc is omitted + // Required only if `.spec.infrastructure.vpc` is omitted. This value defines the + // ID of the VPC where the EKS cluster and its related resources will be created. VpcId *TypesAwsVpcId `json:"vpcId,omitempty" yaml:"vpcId,omitempty" mapstructure:"vpcId,omitempty"` - // Overrides the default IAM role name prefix for the EKS workers + // Overrides the default prefix for the IAM role name of the EKS workers. If not + // set, a name will be generated from the cluster name. WorkersIAMRoleNamePrefixOverride *TypesAwsIamRoleNamePrefix `json:"workersIAMRoleNamePrefixOverride,omitempty" yaml:"workersIAMRoleNamePrefixOverride,omitempty" mapstructure:"workersIAMRoleNamePrefixOverride,omitempty"` } type SpecKubernetesAPIServer struct { - // This value defines if the API server will be accessible only from the private - // subnets + // This value defines if the Kubernetes API server will be accessible from the + // private subnets. Default it `true`. PrivateAccess bool `json:"privateAccess" yaml:"privateAccess" mapstructure:"privateAccess"` - // This value defines the CIDRs that will be allowed to access the API server from - // the private subnets + // The network CIDRs from the private subnets that will be allowed access the + // Kubernetes API server. PrivateAccessCidrs []TypesCidr `json:"privateAccessCidrs,omitempty" yaml:"privateAccessCidrs,omitempty" mapstructure:"privateAccessCidrs,omitempty"` - // This value defines if the API server will be accessible from the public subnets + // This value defines if the Kubernetes API server will be accessible from the + // public subnets. Default is `false`. PublicAccess bool `json:"publicAccess" yaml:"publicAccess" mapstructure:"publicAccess"` - // This value defines the CIDRs that will be allowed to access the API server from - // the public subnets + // The network CIDRs from the public subnets that will be allowed access the + // Kubernetes API server. PublicAccessCidrs []TypesCidr `json:"publicAccessCidrs,omitempty" yaml:"publicAccessCidrs,omitempty" mapstructure:"publicAccessCidrs,omitempty"` } +// Optional additional security configuration for EKS IAM via the `aws-auth` +// configmap. +// +// Ref: https://docs.aws.amazon.com/eks/latest/userguide/auth-configmap.html type SpecKubernetesAwsAuth struct { // This optional array defines additional AWS accounts that will be added to the - // aws-auth configmap + // `aws-auth` configmap. AdditionalAccounts []string `json:"additionalAccounts,omitempty" yaml:"additionalAccounts,omitempty" mapstructure:"additionalAccounts,omitempty"` // This optional array defines additional IAM roles that will be added to the - // aws-auth configmap + // `aws-auth` configmap. Roles []SpecKubernetesAwsAuthRole `json:"roles,omitempty" yaml:"roles,omitempty" mapstructure:"roles,omitempty"` // This optional array defines additional IAM users that will be added to the - // aws-auth configmap + // `aws-auth` configmap. Users []SpecKubernetesAwsAuthUser `json:"users,omitempty" yaml:"users,omitempty" mapstructure:"users,omitempty"` } @@ -1516,6 +1657,8 @@ type SpecKubernetesAwsAuthUser struct { Username string `json:"username" yaml:"username" mapstructure:"username"` } +type SpecKubernetesLogRetentionDays int + type SpecKubernetesLogsTypesElem string const ( @@ -1526,6 +1669,8 @@ const ( SpecKubernetesLogsTypesElemScheduler SpecKubernetesLogsTypesElem = "scheduler" ) +// Array with all the node pool definitions that will join the cluster. Each item +// is an object. type SpecKubernetesNodePool struct { // AdditionalFirewallRules corresponds to the JSON schema field // "additionalFirewallRules". @@ -1535,16 +1680,17 @@ type SpecKubernetesNodePool struct { Ami *SpecKubernetesNodePoolAmi `json:"ami,omitempty" yaml:"ami,omitempty" mapstructure:"ami,omitempty"` // This optional array defines additional target groups to attach to the instances - // in the node pool + // in the node pool. AttachedTargetGroups []TypesAwsArn `json:"attachedTargetGroups,omitempty" yaml:"attachedTargetGroups,omitempty" mapstructure:"attachedTargetGroups,omitempty"` - // The container runtime to use for the nodes + // The container runtime to use in the nodes of the node pool. Default is + // `containerd`. ContainerRuntime *SpecKubernetesNodePoolContainerRuntime `json:"containerRuntime,omitempty" yaml:"containerRuntime,omitempty" mapstructure:"containerRuntime,omitempty"` // Instance corresponds to the JSON schema field "instance". Instance SpecKubernetesNodePoolInstance `json:"instance" yaml:"instance" mapstructure:"instance"` - // Kubernetes labels that will be added to the nodes + // Kubernetes labels that will be added to the nodes. Labels TypesKubeLabels_1 `json:"labels,omitempty" yaml:"labels,omitempty" mapstructure:"labels,omitempty"` // The name of the node pool. @@ -1553,13 +1699,13 @@ type SpecKubernetesNodePool struct { // Size corresponds to the JSON schema field "size". Size SpecKubernetesNodePoolSize `json:"size" yaml:"size" mapstructure:"size"` - // This value defines the subnet IDs where the nodes will be created + // Optional list of subnet IDs where to create the nodes. SubnetIds []TypesAwsSubnetId `json:"subnetIds,omitempty" yaml:"subnetIds,omitempty" mapstructure:"subnetIds,omitempty"` - // AWS tags that will be added to the ASG and EC2 instances + // AWS tags that will be added to the ASG and EC2 instances. Tags TypesAwsTags `json:"tags,omitempty" yaml:"tags,omitempty" mapstructure:"tags,omitempty"` - // Kubernetes taints that will be added to the nodes + // Kubernetes taints that will be added to the nodes. Taints TypesKubeTaints `json:"taints,omitempty" yaml:"taints,omitempty" mapstructure:"taints,omitempty"` // The type of Node Pool, can be `self-managed` for using customization like @@ -1581,10 +1727,11 @@ type SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock struct { // Protocol corresponds to the JSON schema field "protocol". Protocol TypesAwsIpProtocol `json:"protocol" yaml:"protocol" mapstructure:"protocol"` - // Tags corresponds to the JSON schema field "tags". + // Additional AWS tags for the Firewall rule. Tags TypesAwsTags `json:"tags,omitempty" yaml:"tags,omitempty" mapstructure:"tags,omitempty"` - // Type corresponds to the JSON schema field "type". + // The type of the Firewall rule, can be `ingress` for incoming traffic or + // `egress` for outgoing traffic. Type SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType `json:"type" yaml:"type" mapstructure:"type"` } @@ -1595,6 +1742,7 @@ const ( SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockTypeIngress SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType = "ingress" ) +// Port range for the Firewall Rule. type SpecKubernetesNodePoolAdditionalFirewallRulePorts struct { // From corresponds to the JSON schema field "from". From TypesTcpPort `json:"from" yaml:"from" mapstructure:"from"` @@ -1604,22 +1752,23 @@ type SpecKubernetesNodePoolAdditionalFirewallRulePorts struct { } type SpecKubernetesNodePoolAdditionalFirewallRuleSelf struct { - // The name of the FW rule + // The name of the Firewall rule. Name string `json:"name" yaml:"name" mapstructure:"name"` // Ports corresponds to the JSON schema field "ports". Ports SpecKubernetesNodePoolAdditionalFirewallRulePorts `json:"ports" yaml:"ports" mapstructure:"ports"` - // The protocol of the FW rule + // The protocol of the Firewall rule. Protocol TypesAwsIpProtocol `json:"protocol" yaml:"protocol" mapstructure:"protocol"` - // If true, the source will be the security group itself + // If `true`, the source will be the security group itself. Self bool `json:"self" yaml:"self" mapstructure:"self"` - // The tags of the FW rule + // Additional AWS tags for the Firewall rule. Tags TypesAwsTags `json:"tags,omitempty" yaml:"tags,omitempty" mapstructure:"tags,omitempty"` - // The type of the FW rule can be ingress or egress + // The type of the Firewall rule, can be `ingress` for incoming traffic or + // `egress` for outgoing traffic. Type SpecKubernetesNodePoolAdditionalFirewallRuleSelfType `json:"type" yaml:"type" mapstructure:"type"` } @@ -1631,22 +1780,23 @@ const ( ) type SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId struct { - // The name of the FW rule + // The name for the additional Firewall rule Security Group. Name string `json:"name" yaml:"name" mapstructure:"name"` // Ports corresponds to the JSON schema field "ports". Ports SpecKubernetesNodePoolAdditionalFirewallRulePorts `json:"ports" yaml:"ports" mapstructure:"ports"` - // The protocol of the FW rule + // The protocol of the Firewall rule. Protocol TypesAwsIpProtocol `json:"protocol" yaml:"protocol" mapstructure:"protocol"` - // The source security group ID + // The source security group ID. SourceSecurityGroupId string `json:"sourceSecurityGroupId" yaml:"sourceSecurityGroupId" mapstructure:"sourceSecurityGroupId"` - // The tags of the FW rule + // Additional AWS tags for the Firewall rule. Tags TypesAwsTags `json:"tags,omitempty" yaml:"tags,omitempty" mapstructure:"tags,omitempty"` - // The type of the FW rule can be ingress or egress + // The type of the Firewall rule, can be `ingress` for incoming traffic or + // `egress` for outgoing traffic. Type SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType `json:"type" yaml:"type" mapstructure:"type"` } @@ -1657,9 +1807,11 @@ const ( SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdTypeIngress SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType = "ingress" ) +// Optional additional firewall rules that will be attached to the nodes. type SpecKubernetesNodePoolAdditionalFirewallRules struct { - // The CIDR blocks for the FW rule. At the moment the first item of the list will - // be used, others will be ignored. + // The CIDR blocks objects definition for the Firewall rule. Even though it is a + // list, only one item is currently supported. See + // https://github.com/sighupio/fury-eks-installer/issues/46 for more details. CidrBlocks []SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock `json:"cidrBlocks,omitempty" yaml:"cidrBlocks,omitempty" mapstructure:"cidrBlocks,omitempty"` // Self corresponds to the JSON schema field "self". @@ -1715,19 +1867,23 @@ const ( ) type SpecKubernetesNodePoolInstance struct { - // MaxPods corresponds to the JSON schema field "maxPods". + // Set the maximum pods per node to a custom value. If not set will use EKS + // default value that depends on the instance type. + // + // Ref: + // https://github.com/awslabs/amazon-eks-ami/blob/main/templates/shared/runtime/eni-max-pods.txt MaxPods *int `json:"maxPods,omitempty" yaml:"maxPods,omitempty" mapstructure:"maxPods,omitempty"` - // If true, the nodes will be created as spot instances + // If `true`, the nodes will be created as spot instances. Default is `false`. Spot *bool `json:"spot,omitempty" yaml:"spot,omitempty" mapstructure:"spot,omitempty"` - // The instance type to use for the nodes + // The instance type to use for the nodes. Type string `json:"type" yaml:"type" mapstructure:"type"` - // The size of the disk in GB + // The size of the disk in GB. VolumeSize *int `json:"volumeSize,omitempty" yaml:"volumeSize,omitempty" mapstructure:"volumeSize,omitempty"` - // VolumeType corresponds to the JSON schema field "volumeType". + // Volume type for the instance disk. Default is `gp2`. VolumeType *SpecKubernetesNodePoolInstanceVolumeType `json:"volumeType,omitempty" yaml:"volumeType,omitempty" mapstructure:"volumeType,omitempty"` } @@ -1741,10 +1897,10 @@ const ( ) type SpecKubernetesNodePoolSize struct { - // The maximum number of nodes in the node pool + // The maximum number of nodes in the node pool. Max int `json:"max" yaml:"max" mapstructure:"max"` - // The minimum number of nodes in the node pool + // The minimum number of nodes in the node pool. Min int `json:"min" yaml:"min" mapstructure:"min"` } @@ -1837,24 +1993,26 @@ type SpecToolsConfigurationTerraform struct { State SpecToolsConfigurationTerraformState `json:"state" yaml:"state" mapstructure:"state"` } +// Configuration for storing the Terraform state of the cluster. type SpecToolsConfigurationTerraformState struct { // S3 corresponds to the JSON schema field "s3". S3 SpecToolsConfigurationTerraformStateS3 `json:"s3" yaml:"s3" mapstructure:"s3"` } +// Configuration for the S3 bucket used to store the Terraform state. type SpecToolsConfigurationTerraformStateS3 struct { - // This value defines which bucket will be used to store all the states + // This value defines which bucket will be used to store all the states. BucketName TypesAwsS3BucketName `json:"bucketName" yaml:"bucketName" mapstructure:"bucketName"` // This value defines which folder will be used to store all the states inside the - // bucket + // bucket. KeyPrefix TypesAwsS3KeyPrefix `json:"keyPrefix" yaml:"keyPrefix" mapstructure:"keyPrefix"` - // This value defines in which region the bucket is located + // This value defines in which region the bucket is located. Region TypesAwsRegion `json:"region" yaml:"region" mapstructure:"region"` // This value defines if the region of the bucket should be validated or not by - // Terraform, useful when using a bucket in a recently added region + // Terraform, useful when using a bucket in a recently added region. SkipRegionValidation *bool `json:"skipRegionValidation,omitempty" yaml:"skipRegionValidation,omitempty" mapstructure:"skipRegionValidation,omitempty"` } @@ -2006,29 +2164,29 @@ var enumValues_SpecDistributionModulesPolicyType = []interface{}{ } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModules) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesNodePoolAdditionalFirewallRules) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["dr"]; !ok || v == nil { - return fmt.Errorf("field dr in SpecDistributionModules: required") + type Plain SpecKubernetesNodePoolAdditionalFirewallRules + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err } - if v, ok := raw["ingress"]; !ok || v == nil { - return fmt.Errorf("field ingress in SpecDistributionModules: required") + if plain.CidrBlocks != nil && len(plain.CidrBlocks) < 1 { + return fmt.Errorf("field %s length: must be >= %d", "cidrBlocks", 1) } - if v, ok := raw["logging"]; !ok || v == nil { - return fmt.Errorf("field logging in SpecDistributionModules: required") + if len(plain.CidrBlocks) > 1 { + return fmt.Errorf("field %s length: must be <= %d", "cidrBlocks", 1) } - if v, ok := raw["policy"]; !ok || v == nil { - return fmt.Errorf("field policy in SpecDistributionModules: required") + if plain.Self != nil && len(plain.Self) < 1 { + return fmt.Errorf("field %s length: must be >= %d", "self", 1) } - type Plain SpecDistributionModules - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err + if plain.SourceSecurityGroupId != nil && len(plain.SourceSecurityGroupId) < 1 { + return fmt.Errorf("field %s length: must be >= %d", "sourceSecurityGroupId", 1) } - *j = SpecDistributionModules(plain) + *j = SpecKubernetesNodePoolAdditionalFirewallRules(plain) return nil } @@ -2440,14 +2598,6 @@ var enumValues_SpecDistributionModulesLoggingType = []interface{}{ "customOutputs", } -var enumValues_SpecKubernetesLogsTypesElem = []interface{}{ - "api", - "audit", - "authenticator", - "controllerManager", - "scheduler", -} - // UnmarshalJSON implements json.Unmarshaler. func (j *SpecKubernetesLogsTypesElem) UnmarshalJSON(b []byte) error { var v string @@ -2486,6 +2636,12 @@ func (j *SpecDistributionModulesLoggingOpensearch) UnmarshalJSON(b []byte) error return nil } +var enumValues_SpecKubernetesNodePoolsLaunchKind = []interface{}{ + "launch_configurations", + "launch_templates", + "both", +} + // UnmarshalJSON implements json.Unmarshaler. func (j *SpecDistributionModulesLoggingOpensearchType) UnmarshalJSON(b []byte) error { var v string @@ -3245,22 +3401,22 @@ const ( ) // UnmarshalJSON implements json.Unmarshaler. -func (j *TypesAwsRegion) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesLogsTypesElem) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_TypesAwsRegion { + for _, expected := range enumValues_SpecKubernetesLogsTypesElem { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesAwsRegion, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesLogsTypesElem, v) } - *j = TypesAwsRegion(v) + *j = SpecKubernetesLogsTypesElem(v) return nil } @@ -3344,22 +3500,22 @@ type TypesAwsSubnetId string type TypesKubeTaints []string // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesDrType) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesLoggingType) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesDrType { + for _, expected := range enumValues_SpecDistributionModulesLoggingType { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesDrType, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingType, v) } - *j = SpecDistributionModulesDrType(v) + *j = SpecDistributionModulesLoggingType(v) return nil } @@ -3484,58 +3640,73 @@ type TypesFuryModuleComponentOverridesWithIAMRoleName struct { } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuth) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { +func (j *TypesKubeTolerationEffect) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { return err } - if v, ok := raw["provider"]; !ok || v == nil { - return fmt.Errorf("field provider in SpecDistributionModulesAuth: required") + var ok bool + for _, expected := range enumValues_TypesKubeTolerationEffect { + if reflect.DeepEqual(v, expected) { + ok = true + break + } } - type Plain SpecDistributionModulesAuth - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesKubeTolerationEffect, v) } - *j = SpecDistributionModulesAuth(plain) + *j = TypesKubeTolerationEffect(v) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuthProvider) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesAuthPomeriumSecrets) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesAuthProvider: required") + if v, ok := raw["COOKIE_SECRET"]; !ok || v == nil { + return fmt.Errorf("field COOKIE_SECRET in SpecDistributionModulesAuthPomeriumSecrets: required") + } + if v, ok := raw["IDP_CLIENT_SECRET"]; !ok || v == nil { + return fmt.Errorf("field IDP_CLIENT_SECRET in SpecDistributionModulesAuthPomeriumSecrets: required") } - type Plain SpecDistributionModulesAuthProvider + if v, ok := raw["SHARED_SECRET"]; !ok || v == nil { + return fmt.Errorf("field SHARED_SECRET in SpecDistributionModulesAuthPomeriumSecrets: required") + } + if v, ok := raw["SIGNING_KEY"]; !ok || v == nil { + return fmt.Errorf("field SIGNING_KEY in SpecDistributionModulesAuthPomeriumSecrets: required") + } + type Plain SpecDistributionModulesAuthPomeriumSecrets var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesAuthProvider(plain) + *j = SpecDistributionModulesAuthPomeriumSecrets(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuthProviderType) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { +func (j *SpecKubernetesAwsAuthUser) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { return err } - var ok bool - for _, expected := range enumValues_SpecDistributionModulesAuthProviderType { - if reflect.DeepEqual(v, expected) { - ok = true - break - } + if v, ok := raw["groups"]; !ok || v == nil { + return fmt.Errorf("field groups in SpecKubernetesAwsAuthUser: required") } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesAuthProviderType, v) + if v, ok := raw["userarn"]; !ok || v == nil { + return fmt.Errorf("field userarn in SpecKubernetesAwsAuthUser: required") + } + if v, ok := raw["username"]; !ok || v == nil { + return fmt.Errorf("field username in SpecKubernetesAwsAuthUser: required") + } + type Plain SpecKubernetesAwsAuthUser + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err } - *j = SpecDistributionModulesAuthProviderType(v) + *j = SpecKubernetesAwsAuthUser(plain) return nil } @@ -3594,62 +3765,61 @@ func (j *SpecPluginsHelmReleasesElemSetElem) UnmarshalJSON(b []byte) error { } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuthProviderBasicAuth) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { +func (j *SpecDistributionModulesMonitoringType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { return err } - if v, ok := raw["password"]; !ok || v == nil { - return fmt.Errorf("field password in SpecDistributionModulesAuthProviderBasicAuth: required") - } - if v, ok := raw["username"]; !ok || v == nil { - return fmt.Errorf("field username in SpecDistributionModulesAuthProviderBasicAuth: required") + var ok bool + for _, expected := range enumValues_SpecDistributionModulesMonitoringType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } } - type Plain SpecDistributionModulesAuthProviderBasicAuth - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesMonitoringType, v) } - *j = SpecDistributionModulesAuthProviderBasicAuth(plain) + *j = SpecDistributionModulesMonitoringType(v) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuthOverridesIngress) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesMonitoring) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["host"]; !ok || v == nil { - return fmt.Errorf("field host in SpecDistributionModulesAuthOverridesIngress: required") - } - if v, ok := raw["ingressClass"]; !ok || v == nil { - return fmt.Errorf("field ingressClass in SpecDistributionModulesAuthOverridesIngress: required") + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesMonitoring: required") } - type Plain SpecDistributionModulesAuthOverridesIngress + type Plain SpecDistributionModulesMonitoring var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesAuthOverridesIngress(plain) + *j = SpecDistributionModulesMonitoring(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuthDex) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesAPIServer) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["connectors"]; !ok || v == nil { - return fmt.Errorf("field connectors in SpecDistributionModulesAuthDex: required") + if v, ok := raw["privateAccess"]; !ok || v == nil { + return fmt.Errorf("field privateAccess in SpecKubernetesAPIServer: required") + } + if v, ok := raw["publicAccess"]; !ok || v == nil { + return fmt.Errorf("field publicAccess in SpecKubernetesAPIServer: required") } - type Plain SpecDistributionModulesAuthDex + type Plain SpecKubernetesAPIServer var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesAuthDex(plain) + *j = SpecKubernetesAPIServer(plain) return nil } @@ -3662,42 +3832,44 @@ type TypesFuryModuleComponentOverrides struct { } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionCustomPatchesSecretGeneratorResource) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { +func (j *SpecDistributionModulesPolicyGatekeeperEnforcementAction) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { return err } - if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecDistributionCustomPatchesSecretGeneratorResource: required") + var ok bool + for _, expected := range enumValues_SpecDistributionModulesPolicyGatekeeperEnforcementAction { + if reflect.DeepEqual(v, expected) { + ok = true + break + } } - type Plain SpecDistributionCustomPatchesSecretGeneratorResource - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesPolicyGatekeeperEnforcementAction, v) } - *j = SpecDistributionCustomPatchesSecretGeneratorResource(plain) + *j = SpecDistributionModulesPolicyGatekeeperEnforcementAction(v) return nil } type TypesAwsS3KeyPrefix string // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionCustomPatchesSecretGeneratorResourceBehavior) UnmarshalJSON(b []byte) error { +func (j *TypesKubeTolerationOperator_1) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior { + for _, expected := range enumValues_TypesKubeTolerationOperator_1 { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesKubeTolerationOperator_1, v) } - *j = SpecDistributionCustomPatchesSecretGeneratorResourceBehavior(v) + *j = TypesKubeTolerationOperator_1(v) return nil } @@ -3750,20 +3922,26 @@ func (j *SpecToolsConfigurationTerraformState) UnmarshalJSON(b []byte) error { } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionCustomPatchesConfigMapGeneratorResource) UnmarshalJSON(b []byte) error { +func (j *SpecInfrastructureVpnSsh) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecDistributionCustomPatchesConfigMapGeneratorResource: required") + if v, ok := raw["allowedFromCidrs"]; !ok || v == nil { + return fmt.Errorf("field allowedFromCidrs in SpecInfrastructureVpnSsh: required") + } + if v, ok := raw["githubUsersName"]; !ok || v == nil { + return fmt.Errorf("field githubUsersName in SpecInfrastructureVpnSsh: required") } - type Plain SpecDistributionCustomPatchesConfigMapGeneratorResource + type Plain SpecInfrastructureVpnSsh var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionCustomPatchesConfigMapGeneratorResource(plain) + if plain.GithubUsersName != nil && len(plain.GithubUsersName) < 1 { + return fmt.Errorf("field %s length: must be >= %d", "githubUsersName", 1) + } + *j = SpecInfrastructureVpnSsh(plain) return nil } @@ -3806,22 +3984,22 @@ func (j *SpecToolsConfiguration) UnmarshalJSON(b []byte) error { } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesPolicyKyvernoValidationFailureAction) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior { + for _, expected := range enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction, v) } - *j = SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior(v) + *j = SpecDistributionModulesPolicyKyvernoValidationFailureAction(v) return nil } @@ -3865,62 +4043,44 @@ var enumValues_SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior = } // UnmarshalJSON implements json.Unmarshaler. -func (j *TypesKubeToleration) UnmarshalJSON(b []byte) error { +func (j *SpecInfrastructureVpcNetwork) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["effect"]; !ok || v == nil { - return fmt.Errorf("field effect in TypesKubeToleration: required") + if v, ok := raw["cidr"]; !ok || v == nil { + return fmt.Errorf("field cidr in SpecInfrastructureVpcNetwork: required") } - if v, ok := raw["key"]; !ok || v == nil { - return fmt.Errorf("field key in TypesKubeToleration: required") + if v, ok := raw["subnetsCidrs"]; !ok || v == nil { + return fmt.Errorf("field subnetsCidrs in SpecInfrastructureVpcNetwork: required") } - type Plain TypesKubeToleration + type Plain SpecInfrastructureVpcNetwork var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = TypesKubeToleration(plain) + *j = SpecInfrastructureVpcNetwork(plain) return nil } -type TypesKubeToleration struct { - // Effect corresponds to the JSON schema field "effect". - Effect TypesKubeTolerationEffect `json:"effect" yaml:"effect" mapstructure:"effect"` - - // The key of the toleration - Key string `json:"key" yaml:"key" mapstructure:"key"` - - // Operator corresponds to the JSON schema field "operator". - Operator *TypesKubeTolerationOperator `json:"operator,omitempty" yaml:"operator,omitempty" mapstructure:"operator,omitempty"` - - // The value of the toleration - Value *string `json:"value,omitempty" yaml:"value,omitempty" mapstructure:"value,omitempty"` -} - -const ( - TypesKubeTolerationOperatorEqual TypesKubeTolerationOperator = "Equal" - TypesKubeTolerationOperatorExists TypesKubeTolerationOperator = "Exists" -) - // UnmarshalJSON implements json.Unmarshaler. -func (j *TypesKubeTolerationOperator) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { +func (j *SpecDistributionModulesPolicyKyverno) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { return err } - var ok bool - for _, expected := range enumValues_TypesKubeTolerationOperator { - if reflect.DeepEqual(v, expected) { - ok = true - break - } + if v, ok := raw["installDefaultPolicies"]; !ok || v == nil { + return fmt.Errorf("field installDefaultPolicies in SpecDistributionModulesPolicyKyverno: required") } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesKubeTolerationOperator, v) + if v, ok := raw["validationFailureAction"]; !ok || v == nil { + return fmt.Errorf("field validationFailureAction in SpecDistributionModulesPolicyKyverno: required") + } + type Plain SpecDistributionModulesPolicyKyverno + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err } - *j = TypesKubeTolerationOperator(v) + *j = SpecDistributionModulesPolicyKyverno(plain) return nil } @@ -3929,31 +4089,24 @@ var enumValues_TypesKubeTolerationOperator = []interface{}{ "Equal", } -type TypesKubeTolerationOperator string - -const ( - TypesKubeTolerationEffectNoExecute TypesKubeTolerationEffect = "NoExecute" - TypesKubeTolerationEffectPreferNoSchedule TypesKubeTolerationEffect = "PreferNoSchedule" - TypesKubeTolerationEffectNoSchedule TypesKubeTolerationEffect = "NoSchedule" -) - // UnmarshalJSON implements json.Unmarshaler. -func (j *TypesKubeTolerationEffect) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { +func (j *SpecInfrastructureVpcNetworkSubnetsCidrs) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { return err } - var ok bool - for _, expected := range enumValues_TypesKubeTolerationEffect { - if reflect.DeepEqual(v, expected) { - ok = true - break - } + if v, ok := raw["private"]; !ok || v == nil { + return fmt.Errorf("field private in SpecInfrastructureVpcNetworkSubnetsCidrs: required") } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesKubeTolerationEffect, v) + if v, ok := raw["public"]; !ok || v == nil { + return fmt.Errorf("field public in SpecInfrastructureVpcNetworkSubnetsCidrs: required") } - *j = TypesKubeTolerationEffect(v) + type Plain SpecInfrastructureVpcNetworkSubnetsCidrs + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecInfrastructureVpcNetworkSubnetsCidrs(plain) return nil } @@ -4139,20 +4292,20 @@ type TypesSshPubKey string type TypesUri string // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionCommonProvider) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesTracing) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionCommonProvider: required") + return fmt.Errorf("field type in SpecDistributionModulesTracing: required") } - type Plain SpecDistributionCommonProvider + type Plain SpecDistributionModulesTracing var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionCommonProvider(plain) + *j = SpecDistributionModulesTracing(plain) return nil } diff --git a/pkg/apis/kfddistribution/v1alpha2/public/schema.go b/pkg/apis/kfddistribution/v1alpha2/public/schema.go index e1a3f89cc..b5b6d4032 100644 --- a/pkg/apis/kfddistribution/v1alpha2/public/schema.go +++ b/pkg/apis/kfddistribution/v1alpha2/public/schema.go @@ -10,6 +10,7 @@ import ( "github.com/sighupio/go-jsonschema/pkg/types" ) +// KFD modules deployed on top of an existing Kubernetes cluster. type KfddistributionKfdV1Alpha2 struct { // ApiVersion corresponds to the JSON schema field "apiVersion". ApiVersion string `json:"apiVersion" yaml:"apiVersion" mapstructure:"apiVersion"` @@ -29,7 +30,8 @@ type KfddistributionKfdV1Alpha2Kind string const KfddistributionKfdV1Alpha2KindKFDDistribution KfddistributionKfdV1Alpha2Kind = "KFDDistribution" type Metadata struct { - // Name corresponds to the JSON schema field "name". + // The name of the cluster. It will also be used as a prefix for all the other + // resources created. Name string `json:"name" yaml:"name" mapstructure:"name"` } @@ -37,7 +39,9 @@ type Spec struct { // Distribution corresponds to the JSON schema field "distribution". Distribution SpecDistribution `json:"distribution" yaml:"distribution" mapstructure:"distribution"` - // DistributionVersion corresponds to the JSON schema field "distributionVersion". + // Defines which KFD version will be installed and, in consequence, the Kubernetes + // version used to create the cluster. It supports git tags and branches. Example: + // `v1.30.1`. DistributionVersion string `json:"distributionVersion" yaml:"distributionVersion" mapstructure:"distributionVersion"` // Plugins corresponds to the JSON schema field "plugins". @@ -51,36 +55,45 @@ type SpecDistribution struct { // CustomPatches corresponds to the JSON schema field "customPatches". CustomPatches *SpecDistributionCustompatches `json:"customPatches,omitempty" yaml:"customPatches,omitempty" mapstructure:"customPatches,omitempty"` - // The kubeconfig file path + // The path to the kubeconfig file. Kubeconfig string `json:"kubeconfig" yaml:"kubeconfig" mapstructure:"kubeconfig"` // Modules corresponds to the JSON schema field "modules". Modules SpecDistributionModules `json:"modules" yaml:"modules" mapstructure:"modules"` } +// Common configuration for all the distribution modules. type SpecDistributionCommon struct { - // The node selector to use to place the pods for all the KFD modules + // The node selector to use to place the pods for all the KFD modules. Follows + // Kubernetes selector format. Example: `node.kubernetes.io/role: infra`. NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` // Provider corresponds to the JSON schema field "provider". Provider *SpecDistributionCommonProvider `json:"provider,omitempty" yaml:"provider,omitempty" mapstructure:"provider,omitempty"` // URL of the registry where to pull images from for the Distribution phase. - // (Default is registry.sighup.io/fury). + // (Default is `registry.sighup.io/fury`). // // NOTE: If plugins are pulling from the default registry, the registry will be // replaced for the plugin too. Registry *string `json:"registry,omitempty" yaml:"registry,omitempty" mapstructure:"registry,omitempty"` - // The relative path to the vendor directory, does not need to be changed + // The relative path to the vendor directory, does not need to be changed. RelativeVendorPath *string `json:"relativeVendorPath,omitempty" yaml:"relativeVendorPath,omitempty" mapstructure:"relativeVendorPath,omitempty"` - // The tolerations that will be added to the pods for all the KFD modules + // An array with the tolerations that will be added to the pods for all the KFD + // modules. Follows Kubernetes tolerations format. Example: + // + // ```yaml + // - effect: NoSchedule + // key: node.kubernetes.io/role + // value: infra + // ``` Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` } type SpecDistributionCommonProvider struct { - // The type of the provider + // The provider type. Don't set. FOR INTERNAL USE ONLY. Type string `json:"type" yaml:"type" mapstructure:"type"` } @@ -279,8 +292,11 @@ type SpecDistributionModules struct { Tracing *SpecDistributionModulesTracing `json:"tracing,omitempty" yaml:"tracing,omitempty" mapstructure:"tracing,omitempty"` } +// Configuration for the Auth module. type SpecDistributionModulesAuth struct { - // The base domain for the auth module + // Base domain for the ingresses created by the Auth module (Gangplank, Pomerium, + // Dex). Notice that when nginx type is dual, these will use the `external` + // ingress class. BaseDomain *string `json:"baseDomain,omitempty" yaml:"baseDomain,omitempty" mapstructure:"baseDomain,omitempty"` // Dex corresponds to the JSON schema field "dex". @@ -296,11 +312,25 @@ type SpecDistributionModulesAuth struct { Provider SpecDistributionModulesAuthProvider `json:"provider" yaml:"provider" mapstructure:"provider"` } +// Configuration for the Dex package. type SpecDistributionModulesAuthDex struct { - // The additional static clients for dex + // Additional static clients defitions that will be added to the default clients + // included with the distribution in Dex's configuration. Example: + // + // ```yaml + // additionalStaticClients: + // - id: my-custom-client + // name: "A custom additional static client" + // redirectURIs: + // - "https://myapp.tld/redirect" + // - "https://alias.tld/oidc-callback" + // secret: supersecretpassword + // ``` + // Reference: https://dexidp.io/docs/connectors/local/ AdditionalStaticClients []interface{} `json:"additionalStaticClients,omitempty" yaml:"additionalStaticClients,omitempty" mapstructure:"additionalStaticClients,omitempty"` - // The connectors for dex + // A list with each item defining a Dex connector. Follows Dex connectors + // configuration format: https://dexidp.io/docs/connectors/ Connectors []interface{} `json:"connectors" yaml:"connectors" mapstructure:"connectors"` // Expiry corresponds to the JSON schema field "expiry". @@ -318,22 +348,25 @@ type SpecDistributionModulesAuthDexExpiry struct { SigningKeys *string `json:"signingKeys,omitempty" yaml:"signingKeys,omitempty" mapstructure:"signingKeys,omitempty"` } +// Override the common configuration with a particular configuration for the Auth +// module. type SpecDistributionModulesAuthOverrides struct { // Ingresses corresponds to the JSON schema field "ingresses". Ingresses SpecDistributionModulesAuthOverridesIngresses `json:"ingresses,omitempty" yaml:"ingresses,omitempty" mapstructure:"ingresses,omitempty"` - // The node selector to use to place the pods for the auth module + // Set to override the node selector used to place the pods of the Auth module. NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` - // The tolerations that will be added to the pods for the auth module + // Set to override the tolerations that will be added to the pods of the Auth + // module. Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` } type SpecDistributionModulesAuthOverridesIngress struct { - // The host of the ingress + // Use this host for the ingress instead of the default one. Host string `json:"host" yaml:"host" mapstructure:"host"` - // The ingress class of the ingress + // Use this ingress class for the ingress instead of the default one. IngressClass string `json:"ingressClass" yaml:"ingressClass" mapstructure:"ingressClass"` } @@ -461,15 +494,23 @@ type SpecDistributionModulesAuthProvider struct { // BasicAuth corresponds to the JSON schema field "basicAuth". BasicAuth *SpecDistributionModulesAuthProviderBasicAuth `json:"basicAuth,omitempty" yaml:"basicAuth,omitempty" mapstructure:"basicAuth,omitempty"` - // The type of the provider, must be ***none***, ***sso*** or ***basicAuth*** + // The type of the Auth provider, options are: + // - `none`: will disable authentication in the infrastructural ingresses. + // - `sso`: will protect the infrastructural ingresses with Pomerium and Dex (SSO) + // and require authentication before accessing them. + // - `basicAuth`: will protect the infrastructural ingresses with HTTP basic auth + // (username and password) authentication. + // + // Default is `none`. Type SpecDistributionModulesAuthProviderType `json:"type" yaml:"type" mapstructure:"type"` } +// Configuration for the HTTP Basic Auth provider. type SpecDistributionModulesAuthProviderBasicAuth struct { - // The password for the basic auth + // The password for logging in with the HTTP basic authentication. Password string `json:"password" yaml:"password" mapstructure:"password"` - // The username for the basic auth + // The username for logging in with the HTTP basic authentication. Username string `json:"username" yaml:"username" mapstructure:"username"` } @@ -481,11 +522,16 @@ const ( SpecDistributionModulesAuthProviderTypeSso SpecDistributionModulesAuthProviderType = "sso" ) +// Configuration for the Disaster Recovery module. type SpecDistributionModulesDr struct { // Overrides corresponds to the JSON schema field "overrides". Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - // The type of the DR, must be ***none*** or ***on-premises*** + // The type of the Disaster Recovery, must be `none` or `on-premises`. `none` + // disables the module and `on-premises` will install Velero and an optional MinIO + // deployment. + // + // Default is `none`. Type SpecDistributionModulesDrType `json:"type" yaml:"type" mapstructure:"type"` // Velero corresponds to the JSON schema field "velero". @@ -499,6 +545,7 @@ const ( SpecDistributionModulesDrTypeOnPremises SpecDistributionModulesDrType = "on-premises" ) +// Configuration for the Velero package. type SpecDistributionModulesDrVelero struct { // The storage backend type for Velero. `minio` will use an in-cluster MinIO // deployment for object storage, `externalEndpoint` can be used to point to an @@ -602,24 +649,31 @@ type SpecDistributionModulesDrVeleroSnapshotController struct { } type SpecDistributionModulesIngress struct { - // the base domain used for all the KFD ingresses, if in the nginx dual - // configuration, it should be the same as the - // .spec.distribution.modules.ingress.dns.private.name zone + // The base domain used for all the KFD infrastructural ingresses. If using the + // nginx dual type, this value should be the same as the domain associated with + // the `internal` ingress class. BaseDomain string `json:"baseDomain" yaml:"baseDomain" mapstructure:"baseDomain"` - // CertManager corresponds to the JSON schema field "certManager". + // Configuration for the cert-manager package. Required even if + // `ingress.nginx.type` is `none`, cert-manager is used for managing other + // certificates in the cluster besides the TLS termination certificates for the + // ingresses. CertManager *SpecDistributionModulesIngressCertManager `json:"certManager,omitempty" yaml:"certManager,omitempty" mapstructure:"certManager,omitempty"` // Forecastle corresponds to the JSON schema field "forecastle". Forecastle *SpecDistributionModulesIngressForecastle `json:"forecastle,omitempty" yaml:"forecastle,omitempty" mapstructure:"forecastle,omitempty"` - // Configurations for the nginx ingress controller module + // Configurations for the Ingress nginx controller package. Nginx SpecDistributionModulesIngressNginx `json:"nginx" yaml:"nginx" mapstructure:"nginx"` // Overrides corresponds to the JSON schema field "overrides". Overrides *SpecDistributionModulesIngressOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` } +// Configuration for the cert-manager package. Required even if +// `ingress.nginx.type` is `none`, cert-manager is used for managing other +// certificates in the cluster besides the TLS termination certificates for the +// ingresses. type SpecDistributionModulesIngressCertManager struct { // ClusterIssuer corresponds to the JSON schema field "clusterIssuer". ClusterIssuer SpecDistributionModulesIngressCertManagerClusterIssuer `json:"clusterIssuer" yaml:"clusterIssuer" mapstructure:"clusterIssuer"` @@ -628,17 +682,21 @@ type SpecDistributionModulesIngressCertManager struct { Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` } +// Configuration for the cert-manager's ACME clusterIssuer used to request +// certificates from Let's Encrypt. type SpecDistributionModulesIngressCertManagerClusterIssuer struct { - // The email of the cluster issuer + // The email address to use during the certificate issuing process. Email string `json:"email" yaml:"email" mapstructure:"email"` - // The name of the cluster issuer + // Name of the clusterIssuer. Name string `json:"name" yaml:"name" mapstructure:"name"` - // The custom solvers configurations + // List of challenge solvers to use instead of the default one for the `http01` + // challenge. Solvers []interface{} `json:"solvers,omitempty" yaml:"solvers,omitempty" mapstructure:"solvers,omitempty"` - // The type of the cluster issuer, must be ***http01*** + // The type of the clusterIssuer. Only `http01` challenge is supported for + // KFDDistribution kind. See solvers for arbitrary configurations. Type *SpecDistributionModulesIngressCertManagerClusterIssuerType `json:"type,omitempty" yaml:"type,omitempty" mapstructure:"type,omitempty"` } @@ -658,14 +716,24 @@ type SpecDistributionModulesIngressNginx struct { // Tls corresponds to the JSON schema field "tls". Tls *SpecDistributionModulesIngressNginxTLS `json:"tls,omitempty" yaml:"tls,omitempty" mapstructure:"tls,omitempty"` - // The type of the nginx ingress controller, must be ***none***, ***single*** or - // ***dual*** + // The type of the Ingress nginx controller, options are: + // - `none`: no ingress controller will be installed and no infrastructural + // ingresses will be created. + // - `single`: a single ingress controller with ingress class `nginx` will be + // installed to manage all the ingress resources, infrastructural ingresses will + // be created. + // - `dual`: two independent ingress controllers will be installed, one for the + // `internal` ingress class intended for private ingresses and one for the + // `external` ingress class intended for public ingresses. KFD infrastructural + // ingresses wil use the `internal` ingress class when using the dual type. + // + // Default is `single`. Type SpecDistributionModulesIngressNginxType `json:"type" yaml:"type" mapstructure:"type"` } type SpecDistributionModulesIngressNginxTLS struct { - // The provider of the TLS certificate, must be ***none***, ***certManager*** or - // ***secret*** + // The provider of the TLS certificates for the ingresses, one of: `none`, + // `certManager`, or `secret`. Provider SpecDistributionModulesIngressNginxTLSProvider `json:"provider" yaml:"provider" mapstructure:"provider"` // Secret corresponds to the JSON schema field "secret". @@ -680,15 +748,18 @@ const ( SpecDistributionModulesIngressNginxTLSProviderSecret SpecDistributionModulesIngressNginxTLSProvider = "secret" ) +// Kubernetes TLS secret for the ingresses TLS certificate. type SpecDistributionModulesIngressNginxTLSSecret struct { - // Ca corresponds to the JSON schema field "ca". + // The Certificate Authority certificate file's content. You can use the + // `"{file://}"` notation to get the content from a file. Ca string `json:"ca" yaml:"ca" mapstructure:"ca"` - // The certificate file content or you can use the file notation to get the - // content from a file + // The certificate file's content. You can use the `"{file://}"` notation to + // get the content from a file. Cert string `json:"cert" yaml:"cert" mapstructure:"cert"` - // Key corresponds to the JSON schema field "key". + // The signing key file's content. You can use the `"{file://}"` notation to + // get the content from a file. Key string `json:"key" yaml:"key" mapstructure:"key"` } @@ -700,14 +771,17 @@ const ( SpecDistributionModulesIngressNginxTypeSingle SpecDistributionModulesIngressNginxType = "single" ) +// Override the common configuration with a particular configuration for the +// Ingress module. type SpecDistributionModulesIngressOverrides struct { // Ingresses corresponds to the JSON schema field "ingresses". Ingresses *SpecDistributionModulesIngressOverridesIngresses `json:"ingresses,omitempty" yaml:"ingresses,omitempty" mapstructure:"ingresses,omitempty"` - // The node selector to use to place the pods for the ingress module + // Set to override the node selector used to place the pods of the Ingress module. NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` - // The tolerations that will be added to the pods for the ingress module + // Set to override the tolerations that will be added to the pods of the Ingress + // module. Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` } @@ -716,6 +790,7 @@ type SpecDistributionModulesIngressOverridesIngresses struct { Forecastle *TypesFuryModuleOverridesIngress `json:"forecastle,omitempty" yaml:"forecastle,omitempty" mapstructure:"forecastle,omitempty"` } +// Configuration for the Logging module. type SpecDistributionModulesLogging struct { // Cerebro corresponds to the JSON schema field "cerebro". Cerebro *SpecDistributionModulesLoggingCerebro `json:"cerebro,omitempty" yaml:"cerebro,omitempty" mapstructure:"cerebro,omitempty"` @@ -738,79 +813,87 @@ type SpecDistributionModulesLogging struct { // Overrides corresponds to the JSON schema field "overrides". Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - // selects the logging stack. Choosing none will disable the centralized logging. - // Choosing opensearch will deploy and configure the Logging Operator and an + // Selects the logging stack. Options are: + // - `none`: will disable the centralized logging. + // - `opensearch`: will deploy and configure the Logging Operator and an // OpenSearch cluster (can be single or triple for HA) where the logs will be - // stored. Choosing loki will use a distributed Grafana Loki instead of OpenSearh - // for storage. Choosing customOuput the Logging Operator will be deployed and - // installed but with no local storage, you will have to create the needed Outputs - // and ClusterOutputs to ship the logs to your desired storage. + // stored. + // - `loki`: will use a distributed Grafana Loki instead of OpenSearh for storage. + // - `customOuputs`: the Logging Operator will be deployed and installed but with + // no local storage, you will have to create the needed Outputs and ClusterOutputs + // to ship the logs to your desired storage. + // + // Default is `opensearch`. Type SpecDistributionModulesLoggingType `json:"type" yaml:"type" mapstructure:"type"` } +// DEPRECATED since KFD v1.26.6, 1.27.5, v1.28.0. type SpecDistributionModulesLoggingCerebro struct { // Overrides corresponds to the JSON schema field "overrides". Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` } -// when using the customOutputs logging type, you need to manually specify the spec -// of the several Output and ClusterOutputs that the Logging Operator expects to -// forward the logs collected by the pre-defined flows. +// When using the `customOutputs` logging type, you need to manually specify the +// spec of the several `Output` and `ClusterOutputs` that the Logging Operator +// expects to forward the logs collected by the pre-defined flows. type SpecDistributionModulesLoggingCustomOutputs struct { - // This value defines where the output from Flow will be sent. Will be the `spec` - // section of the `Output` object. It must be a string (and not a YAML object) - // following the OutputSpec definition. Use the nullout output to discard the - // flow. + // This value defines where the output from the `audit` Flow will be sent. This + // will be the `spec` section of the `Output` object. It must be a string (and not + // a YAML object) following the OutputSpec definition. Use the `nullout` output to + // discard the flow: `nullout: {}` Audit string `json:"audit" yaml:"audit" mapstructure:"audit"` - // This value defines where the output from Flow will be sent. Will be the `spec` - // section of the `Output` object. It must be a string (and not a YAML object) - // following the OutputSpec definition. Use the nullout output to discard the - // flow. + // This value defines where the output from the `errors` Flow will be sent. This + // will be the `spec` section of the `Output` object. It must be a string (and not + // a YAML object) following the OutputSpec definition. Use the `nullout` output to + // discard the flow: `nullout: {}` Errors string `json:"errors" yaml:"errors" mapstructure:"errors"` - // This value defines where the output from Flow will be sent. Will be the `spec` - // section of the `Output` object. It must be a string (and not a YAML object) - // following the OutputSpec definition. Use the nullout output to discard the - // flow. + // This value defines where the output from the `events` Flow will be sent. This + // will be the `spec` section of the `Output` object. It must be a string (and not + // a YAML object) following the OutputSpec definition. Use the `nullout` output to + // discard the flow: `nullout: {}` Events string `json:"events" yaml:"events" mapstructure:"events"` - // This value defines where the output from Flow will be sent. Will be the `spec` - // section of the `Output` object. It must be a string (and not a YAML object) - // following the OutputSpec definition. Use the nullout output to discard the - // flow. + // This value defines where the output from the `infra` Flow will be sent. This + // will be the `spec` section of the `Output` object. It must be a string (and not + // a YAML object) following the OutputSpec definition. Use the `nullout` output to + // discard the flow: `nullout: {}` Infra string `json:"infra" yaml:"infra" mapstructure:"infra"` - // This value defines where the output from Flow will be sent. Will be the `spec` - // section of the `Output` object. It must be a string (and not a YAML object) - // following the OutputSpec definition. Use the nullout output to discard the - // flow. + // This value defines where the output from the `ingressNginx` Flow will be sent. + // This will be the `spec` section of the `Output` object. It must be a string + // (and not a YAML object) following the OutputSpec definition. Use the `nullout` + // output to discard the flow: `nullout: {}` IngressNginx string `json:"ingressNginx" yaml:"ingressNginx" mapstructure:"ingressNginx"` - // This value defines where the output from Flow will be sent. Will be the `spec` - // section of the `Output` object. It must be a string (and not a YAML object) - // following the OutputSpec definition. Use the nullout output to discard the - // flow. + // This value defines where the output from the `kubernetes` Flow will be sent. + // This will be the `spec` section of the `Output` object. It must be a string + // (and not a YAML object) following the OutputSpec definition. Use the `nullout` + // output to discard the flow: `nullout: {}` Kubernetes string `json:"kubernetes" yaml:"kubernetes" mapstructure:"kubernetes"` - // This value defines where the output from Flow will be sent. Will be the `spec` - // section of the `Output` object. It must be a string (and not a YAML object) - // following the OutputSpec definition. Use the nullout output to discard the - // flow. + // This value defines where the output from the `systemdCommon` Flow will be sent. + // This will be the `spec` section of the `Output` object. It must be a string + // (and not a YAML object) following the OutputSpec definition. Use the `nullout` + // output to discard the flow: `nullout: {}` SystemdCommon string `json:"systemdCommon" yaml:"systemdCommon" mapstructure:"systemdCommon"` - // This value defines where the output from Flow will be sent. Will be the `spec` - // section of the `Output` object. It must be a string (and not a YAML object) - // following the OutputSpec definition. Use the nullout output to discard the - // flow. + // This value defines where the output from the `systemdEtcd` Flow will be sent. + // This will be the `spec` section of the `Output` object. It must be a string + // (and not a YAML object) following the OutputSpec definition. Use the `nullout` + // output to discard the flow: `nullout: {}` SystemdEtcd string `json:"systemdEtcd" yaml:"systemdEtcd" mapstructure:"systemdEtcd"` } +// Configuration for the Loki package. type SpecDistributionModulesLoggingLoki struct { - // Backend corresponds to the JSON schema field "backend". + // The storage backend type for Loki. `minio` will use an in-cluster MinIO + // deployment for object storage, `externalEndpoint` can be used to point to an + // external object storage instead of deploying an in-cluster MinIO. Backend *SpecDistributionModulesLoggingLokiBackend `json:"backend,omitempty" yaml:"backend,omitempty" mapstructure:"backend,omitempty"` - // ExternalEndpoint corresponds to the JSON schema field "externalEndpoint". + // Configuration for Loki's external storage backend. ExternalEndpoint *SpecDistributionModulesLoggingLokiExternalEndpoint `json:"externalEndpoint,omitempty" yaml:"externalEndpoint,omitempty" mapstructure:"externalEndpoint,omitempty"` // Resources corresponds to the JSON schema field "resources". @@ -836,23 +919,25 @@ const ( SpecDistributionModulesLoggingLokiBackendMinio SpecDistributionModulesLoggingLokiBackend = "minio" ) +// Configuration for Loki's external storage backend. type SpecDistributionModulesLoggingLokiExternalEndpoint struct { - // The access key id of the loki external endpoint + // The access key ID (username) for the external S3-compatible bucket. AccessKeyId *string `json:"accessKeyId,omitempty" yaml:"accessKeyId,omitempty" mapstructure:"accessKeyId,omitempty"` - // The bucket name of the loki external endpoint + // The bucket name of the external S3-compatible object storage. BucketName *string `json:"bucketName,omitempty" yaml:"bucketName,omitempty" mapstructure:"bucketName,omitempty"` - // The endpoint of the loki external endpoint + // External S3-compatible endpoint for Loki's storage. Endpoint *string `json:"endpoint,omitempty" yaml:"endpoint,omitempty" mapstructure:"endpoint,omitempty"` - // If true, the loki external endpoint will be insecure + // If true, will use HTTP as protocol instead of HTTPS. Insecure *bool `json:"insecure,omitempty" yaml:"insecure,omitempty" mapstructure:"insecure,omitempty"` - // The secret access key of the loki external endpoint + // The secret access key (password) for the external S3-compatible bucket. SecretAccessKey *string `json:"secretAccessKey,omitempty" yaml:"secretAccessKey,omitempty" mapstructure:"secretAccessKey,omitempty"` } +// Configuration for Logging's MinIO deployment. type SpecDistributionModulesLoggingMinio struct { // Overrides corresponds to the JSON schema field "overrides". Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` @@ -860,15 +945,15 @@ type SpecDistributionModulesLoggingMinio struct { // RootUser corresponds to the JSON schema field "rootUser". RootUser *SpecDistributionModulesLoggingMinioRootUser `json:"rootUser,omitempty" yaml:"rootUser,omitempty" mapstructure:"rootUser,omitempty"` - // The PVC size for each minio disk, 6 disks total + // The PVC size for each MinIO disk, 6 disks total. StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"` } type SpecDistributionModulesLoggingMinioRootUser struct { - // The password of the minio root user + // The password for the default MinIO root user. Password *string `json:"password,omitempty" yaml:"password,omitempty" mapstructure:"password,omitempty"` - // The username of the minio root user + // The username for the default MinIO root user. Username *string `json:"username,omitempty" yaml:"username,omitempty" mapstructure:"username,omitempty"` } @@ -879,10 +964,11 @@ type SpecDistributionModulesLoggingOpensearch struct { // Resources corresponds to the JSON schema field "resources". Resources *TypesKubeResources `json:"resources,omitempty" yaml:"resources,omitempty" mapstructure:"resources,omitempty"` - // The storage size for the opensearch pods + // The storage size for the OpenSearch volumes. StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"` - // The type of the opensearch, must be ***single*** or ***triple*** + // The type of OpenSearch deployment. One of: `single` for a single replica or + // `triple` for an HA 3-replicas deployment. Type SpecDistributionModulesLoggingOpensearchType `json:"type" yaml:"type" mapstructure:"type"` } @@ -893,6 +979,7 @@ const ( SpecDistributionModulesLoggingOpensearchTypeTriple SpecDistributionModulesLoggingOpensearchType = "triple" ) +// Configuration for the Logging Operator. type SpecDistributionModulesLoggingOperator struct { // Overrides corresponds to the JSON schema field "overrides". Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` @@ -1582,6 +1669,13 @@ func (j *SpecDistributionModulesIngressNginxTLSSecret) UnmarshalJSON(b []byte) e return nil } +var enumValues_SpecDistributionModulesLoggingType = []interface{}{ + "none", + "opensearch", + "loki", + "customOutputs", +} + // UnmarshalJSON implements json.Unmarshaler. func (j *SpecDistributionModulesIngressNginxTLSProvider) UnmarshalJSON(b []byte) error { var v string @@ -1669,6 +1763,61 @@ func (j *SpecDistributionModulesIngressCertManagerClusterIssuer) UnmarshalJSON(b return nil } +type SpecDistributionModulesMonitoringAlertManager struct { + // The webhook URL to send dead man's switch monitoring, for example to use with + // healthchecks.io. + DeadManSwitchWebhookUrl *string `json:"deadManSwitchWebhookUrl,omitempty" yaml:"deadManSwitchWebhookUrl,omitempty" mapstructure:"deadManSwitchWebhookUrl,omitempty"` + + // Set to false to avoid installing the Prometheus rules (alerts) included with + // the distribution. + InstallDefaultRules *bool `json:"installDefaultRules,omitempty" yaml:"installDefaultRules,omitempty" mapstructure:"installDefaultRules,omitempty"` + + // The Slack webhook URL where to send the infrastructural and workload alerts to. + SlackWebhookUrl *string `json:"slackWebhookUrl,omitempty" yaml:"slackWebhookUrl,omitempty" mapstructure:"slackWebhookUrl,omitempty"` +} + +type SpecDistributionModulesMonitoringBlackboxExporter struct { + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` +} + +type SpecDistributionModulesMonitoringGrafana struct { + // Setting this to true will deploy an additional `grafana-basic-auth` ingress + // protected with Grafana's basic auth instead of SSO. It's intended use is as a + // temporary ingress for when there are problems with the SSO login flow. + // + // Notice that by default anonymous access is enabled. + BasicAuthIngress *bool `json:"basicAuthIngress,omitempty" yaml:"basicAuthIngress,omitempty" mapstructure:"basicAuthIngress,omitempty"` + + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // [JMESPath](http://jmespath.org/examples.html) expression to retrieve the user's + // role. Example: + // + // ```yaml + // usersRoleAttributePath: "contains(groups[*], 'beta') && 'Admin' || + // contains(groups[*], 'gamma') && 'Editor' || contains(groups[*], 'delta') && + // 'Viewer' + // ``` + // + // More details in [Grafana's + // documentation](https://grafana.com/docs/grafana/latest/setup-grafana/configure-security/configure-authentication/generic-oauth/#configure-role-mapping). + UsersRoleAttributePath *string `json:"usersRoleAttributePath,omitempty" yaml:"usersRoleAttributePath,omitempty" mapstructure:"usersRoleAttributePath,omitempty"` +} + +type SpecDistributionModulesMonitoringKubeStateMetrics struct { + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` +} + +type SpecDistributionModulesMonitoringMimirBackend string + +var enumValues_SpecDistributionModulesMonitoringMimirBackend = []interface{}{ + "minio", + "externalEndpoint", +} + // UnmarshalJSON implements json.Unmarshaler. func (j *SpecDistributionModulesIngressCertManagerClusterIssuerType) UnmarshalJSON(b []byte) error { var v string @@ -2133,22 +2282,22 @@ func (j *SpecDistributionModulesLoggingOpensearch) UnmarshalJSON(b []byte) error } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionCustomPatchesSecretGeneratorResourceBehavior) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior { + for _, expected := range enumValues_SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior, v) } - *j = SpecDistributionCustomPatchesSecretGeneratorResourceBehavior(v) + *j = SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior(v) return nil } @@ -2769,6 +2918,81 @@ type TypesIpAddress string type TypesKubeLabels_1 map[string]string +type TypesKubeTaintsEffect string + +var enumValues_TypesKubeTaintsEffect = []interface{}{ + "NoSchedule", + "PreferNoSchedule", + "NoExecute", +} +type TypesEnvRef string + +// UnmarshalJSON implements json.Unmarshaler. +func (j *TypesKubeTaintsEffect) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_TypesKubeTaintsEffect { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesKubeTaintsEffect, v) + } + *j = TypesKubeTaintsEffect(v) + return nil +} + +const ( + TypesKubeTaintsEffectNoSchedule TypesKubeTaintsEffect = "NoSchedule" + TypesKubeTaintsEffectPreferNoSchedule TypesKubeTaintsEffect = "PreferNoSchedule" + TypesKubeTaintsEffectNoExecute TypesKubeTaintsEffect = "NoExecute" +) + +type TypesKubeTaints struct { + // Effect corresponds to the JSON schema field "effect". + Effect TypesKubeTaintsEffect `json:"effect" yaml:"effect" mapstructure:"effect"` + + // Key corresponds to the JSON schema field "key". + Key string `json:"key" yaml:"key" mapstructure:"key"` + + // Value corresponds to the JSON schema field "value". + Value string `json:"value" yaml:"value" mapstructure:"value"` +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *TypesKubeTaints) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["effect"]; !ok || v == nil { + return fmt.Errorf("field effect in TypesKubeTaints: required") + } + if v, ok := raw["key"]; !ok || v == nil { + return fmt.Errorf("field key in TypesKubeTaints: required") + } + if v, ok := raw["value"]; !ok || v == nil { + return fmt.Errorf("field value in TypesKubeTaints: required") + } + type Plain TypesKubeTaints + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = TypesKubeTaints(plain) + return nil +} +type TypesFileRef string + +type TypesIpAddress string + +type TypesKubeLabels_1 map[string]string + type TypesKubeTaints []string type TypesSemVer string diff --git a/pkg/apis/onpremises/v1alpha2/public/schema.go b/pkg/apis/onpremises/v1alpha2/public/schema.go index 88946d9ca..b3f3b16ed 100644 --- a/pkg/apis/onpremises/v1alpha2/public/schema.go +++ b/pkg/apis/onpremises/v1alpha2/public/schema.go @@ -16,6 +16,7 @@ type Metadata struct { Name string `json:"name" yaml:"name" mapstructure:"name"` } +// A KFD Cluster deployed on top of a set of existing VMs. type OnpremisesKfdV1Alpha2 struct { // ApiVersion corresponds to the JSON schema field "apiVersion". ApiVersion string `json:"apiVersion" yaml:"apiVersion" mapstructure:"apiVersion"` @@ -40,7 +41,7 @@ type Spec struct { // Defines which KFD version will be installed and, in consequence, the Kubernetes // version used to create the cluster. It supports git tags and branches. Example: - // v1.30.1. + // `v1.30.1`. DistributionVersion string `json:"distributionVersion" yaml:"distributionVersion" mapstructure:"distributionVersion"` // Kubernetes corresponds to the JSON schema field "kubernetes". @@ -68,7 +69,7 @@ type SpecDistributionCommon struct { NetworkPoliciesEnabled *bool `json:"networkPoliciesEnabled,omitempty" yaml:"networkPoliciesEnabled,omitempty" mapstructure:"networkPoliciesEnabled,omitempty"` // The node selector to use to place the pods for all the KFD modules. Follows - // Kubernetes selector format. Example: `node.kubernetes.io/role: infra` + // Kubernetes selector format. Example: `node.kubernetes.io/role: infra`. NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` // Provider corresponds to the JSON schema field "provider". @@ -76,6 +77,9 @@ type SpecDistributionCommon struct { // URL of the registry where to pull images from for the Distribution phase. // (Default is `registry.sighup.io/fury`). + // + // NOTE: If plugins are pulling from the default registry, the registry will be + // replaced for the plugin too. Registry *string `json:"registry,omitempty" yaml:"registry,omitempty" mapstructure:"registry,omitempty"` // The relative path to the vendor directory, does not need to be changed. @@ -547,6 +551,8 @@ type SpecDistributionModulesAuthProvider struct { // and require authentication before accessing them. // - `basicAuth`: will protect the infrastructural ingresses with HTTP basic auth // (username and password) authentication. + // + // Default is `none`. Type SpecDistributionModulesAuthProviderType `json:"type" yaml:"type" mapstructure:"type"` } @@ -575,6 +581,8 @@ type SpecDistributionModulesDr struct { // The type of the Disaster Recovery, must be `none` or `on-premises`. `none` // disables the module and `on-premises` will install Velero and an optional MinIO // deployment. + // + // Default is `none`. Type SpecDistributionModulesDrType `json:"type" yaml:"type" mapstructure:"type"` // Velero corresponds to the JSON schema field "velero". @@ -709,7 +717,7 @@ type SpecDistributionModulesIngress struct { // If corresponds to the JSON schema field "if". If interface{} `json:"if,omitempty" yaml:"if,omitempty" mapstructure:"if,omitempty"` - // Configurations for the nginx ingress controller package. + // Configurations for the Ingress nginx controller package. Nginx SpecDistributionModulesIngressNginx `json:"nginx" yaml:"nginx" mapstructure:"nginx"` // Overrides corresponds to the JSON schema field "overrides". @@ -737,7 +745,7 @@ type SpecDistributionModulesIngressCertManagerClusterIssuer struct { // The email address to use during the certificate issuing process. Email string `json:"email" yaml:"email" mapstructure:"email"` - // Name of the clusterIssuer + // Name of the clusterIssuer. Name string `json:"name" yaml:"name" mapstructure:"name"` // List of challenge solvers to use instead of the default one for the `http01` @@ -765,7 +773,7 @@ type SpecDistributionModulesIngressNginx struct { // Tls corresponds to the JSON schema field "tls". Tls *SpecDistributionModulesIngressNginxTLS `json:"tls,omitempty" yaml:"tls,omitempty" mapstructure:"tls,omitempty"` - // The type of the nginx ingress controller, options are: + // The type of the Ingress nginx controller, options are: // - `none`: no ingress controller will be installed and no infrastructural // ingresses will be created. // - `single`: a single ingress controller with ingress class `nginx` will be @@ -775,6 +783,8 @@ type SpecDistributionModulesIngressNginx struct { // `internal` ingress class intended for private ingresses and one for the // `external` ingress class intended for public ingresses. KFD infrastructural // ingresses wil use the `internal` ingress class when using the dual type. + // + // Default is `single`. Type SpecDistributionModulesIngressNginxType `json:"type" yaml:"type" mapstructure:"type"` } @@ -824,11 +834,11 @@ type SpecDistributionModulesIngressOverrides struct { // Ingresses corresponds to the JSON schema field "ingresses". Ingresses *SpecDistributionModulesIngressOverridesIngresses `json:"ingresses,omitempty" yaml:"ingresses,omitempty" mapstructure:"ingresses,omitempty"` - // Set to override the node selector used to place the pods of the Ingress module + // Set to override the node selector used to place the pods of the Ingress module. NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` // Set to override the tolerations that will be added to the pods of the Ingress - // module + // module. Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` } @@ -869,10 +879,12 @@ type SpecDistributionModulesLogging struct { // - `customOuputs`: the Logging Operator will be deployed and installed but with // no local storage, you will have to create the needed Outputs and ClusterOutputs // to ship the logs to your desired storage. + // + // Default is `opensearch`. Type SpecDistributionModulesLoggingType `json:"type" yaml:"type" mapstructure:"type"` } -// DEPRECATED in latest versions of KFD. +// DEPRECATED since KFD v1.26.6, 1.27.5, v1.28.0. type SpecDistributionModulesLoggingCerebro struct { // Overrides corresponds to the JSON schema field "overrides". Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` @@ -1083,6 +1095,8 @@ type SpecDistributionModulesMonitoring struct { // storing them locally in the cluster. // - `mimir`: will install the same as the `prometheus` option, plus Grafana Mimir // that allows for longer retention of metrics and the usage of Object Storage. + // + // Default is `prometheus`. Type SpecDistributionModulesMonitoringType `json:"type" yaml:"type" mapstructure:"type"` // X509Exporter corresponds to the JSON schema field "x509Exporter". @@ -1091,7 +1105,7 @@ type SpecDistributionModulesMonitoring struct { type SpecDistributionModulesMonitoringAlertManager struct { // The webhook URL to send dead man's switch monitoring, for example to use with - // healthchecks.io + // healthchecks.io. DeadManSwitchWebhookUrl *string `json:"deadManSwitchWebhookUrl,omitempty" yaml:"deadManSwitchWebhookUrl,omitempty" mapstructure:"deadManSwitchWebhookUrl,omitempty"` // Set to false to avoid installing the Prometheus rules (alerts) included with diff --git a/schemas/private/ekscluster-kfd-v1alpha2.json b/schemas/private/ekscluster-kfd-v1alpha2.json index 538188105..5e6c07b26 100644 --- a/schemas/private/ekscluster-kfd-v1alpha2.json +++ b/schemas/private/ekscluster-kfd-v1alpha2.json @@ -34,6 +34,7 @@ "properties": { "name": { "type": "string", + "description": "The name of the cluster. It will also be used as a prefix for all the other resources created.", "minLength": 1, "maxLength": 56 } @@ -48,17 +49,20 @@ "properties": { "distributionVersion": { "type": "string", + "description": "Defines which KFD version will be installed and, in consequence, the Kubernetes version used to create the cluster. It supports git tags and branches. Example: `v1.30.1`.", "minLength": 1 }, "region": { - "$ref": "#/$defs/Types.AwsRegion" + "$ref": "#/$defs/Types.AwsRegion", + "description": "Defines in which AWS region the cluster and all the related resources will be created." }, "tags": { "$ref": "#/$defs/Types.AwsTags", "description": "This map defines which will be the common tags that will be added to all the resources created on AWS." }, "toolsConfiguration": { - "$ref": "#/$defs/Spec.ToolsConfiguration" + "$ref": "#/$defs/Spec.ToolsConfiguration", + "description": "Configuration for tools used by furyctl, like Terraform." }, "infrastructure": { "$ref": "#/$defs/Spec.Infrastructure" @@ -279,6 +283,7 @@ }, "Spec.Infrastructure.Vpc": { "type": "object", + "description": "Configuration for the VPC that will be created to host the EKS cluster and its related resources. If you already have a VPC that you want to use, leave this section empty and use `.spec.kubernetes.vpcId` instead.", "additionalProperties": false, "properties": { "network": { @@ -424,6 +429,7 @@ }, "Spec.Kubernetes": { "type": "object", + "description": "Defines the Kubernetes components configuration and the values needed for the `kubernetes` phase of furyctl.", "additionalProperties": false, "properties": { "vpcId": { @@ -916,6 +922,7 @@ }, "Spec.Kubernetes.NodePool.AdditionalFirewallRule.Ports": { "type": "object", + "description": "Port range for the Firewall Rule.", "additionalProperties": false, "properties": { "from": { @@ -1183,7 +1190,8 @@ "description": "Configurations for the nginx ingress controller module" }, "certManager": { - "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.CertManager" + "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.CertManager", + "description": "Configuration for the cert-manager package. Required even if `ingress.nginx.type` is `none`, cert-manager is used for managing other certificates in the cluster besides the TLS termination certificates for the ingresses." }, "dns": { "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.DNS" @@ -1280,6 +1288,7 @@ "Spec.Distribution.Modules.Ingress.Overrides": { "type": "object", "additionalProperties": false, + "description": "Override the common configuration with a particular configuration for the Ingress module.", "properties": { "ingresses": { "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.Overrides.Ingresses" @@ -1326,7 +1335,7 @@ "single", "dual" ], - "description": "The type of the nginx ingress controller, must be ***none***, ***single*** or ***dual***" + "description": "The type of the Ingress nginx controller, options are:\n- `none`: no ingress controller will be installed and no infrastructural ingresses will be created.\n- `single`: a single ingress controller with ingress class `nginx` will be installed to manage all the ingress resources, infrastructural ingresses will be created.\n- `dual`: two independent ingress controllers will be installed, one for the `internal` ingress class intended for private ingresses and one for the `external` ingress class intended for public ingresses. KFD infrastructural ingresses wil use the `internal` ingress class when using the dual type.\n\nDefault is `single`." }, "tls": { "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.Nginx.TLS" @@ -1350,7 +1359,7 @@ "secret", "none" ], - "description": "The provider of the TLS certificate, must be ***none***, ***certManager*** or ***secret***" + "description": "The provider of the TLS certificates for the ingresses, one of: `none`, `certManager`, or `secret`." }, "secret": { "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.Nginx.TLS.Secret" @@ -1375,16 +1384,19 @@ "Spec.Distribution.Modules.Ingress.Nginx.TLS.Secret": { "type": "object", "additionalProperties": false, + "description": "Kubernetes TLS secret for the ingresses TLS certificate.", "properties": { "cert": { "type": "string", - "description": "The certificate file content or you can use the file notation to get the content from a file" + "description": "The certificate file's content. You can use the `\"{file://}\"` notation to get the content from a file." }, "key": { - "type": "string" + "type": "string", + "description": "The signing key file's content. You can use the `\"{file://}\"` notation to get the content from a file." }, "ca": { - "type": "string" + "type": "string", + "description": "The Certificate Authority certificate file's content. You can use the `\"{file://}\"` notation to get the content from a file." } }, "required": [ @@ -1457,6 +1469,7 @@ }, "Spec.Distribution.Modules.Ingress.DNS": { "type": "object", + "description": "DNS definition, used in conjunction with `externalDNS` package to automate DNS management and certificates emission.", "additionalProperties": false, "properties": { "public": { @@ -1513,6 +1526,7 @@ "Spec.Distribution.Modules.Logging": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Logging module.", "properties": { "overrides": { "$ref": "#/$defs/Types.FuryModuleOverrides" @@ -1525,7 +1539,7 @@ "loki", "customOutputs" ], - "description": "selects the logging stack. Choosing none will disable the centralized logging. Choosing opensearch will deploy and configure the Logging Operator and an OpenSearch cluster (can be single or triple for HA) where the logs will be stored. Choosing loki will use a distributed Grafana Loki instead of OpenSearh for storage. Choosing customOuput the Logging Operator will be deployed and installed but with no local storage, you will have to create the needed Outputs and ClusterOutputs to ship the logs to your desired storage." + "description": "Selects the logging stack. Options are:\n- `none`: will disable the centralized logging.\n- `opensearch`: will deploy and configure the Logging Operator and an OpenSearch cluster (can be single or triple for HA) where the logs will be stored.\n- `loki`: will use a distributed Grafana Loki instead of OpenSearh for storage.\n- `customOuputs`: the Logging Operator will be deployed and installed but with no local storage, you will have to create the needed Outputs and ClusterOutputs to ship the logs to your desired storage.\n\nDefault is `opensearch`." }, "opensearch": { "$ref": "#/$defs/Spec.Distribution.Modules.Logging.Opensearch" @@ -1623,6 +1637,7 @@ }, "Spec.Distribution.Modules.Logging.Cerebro": { "type": "object", + "description": "DEPRECATED since KFD v1.26.6, 1.27.5, v1.28.0.", "additionalProperties": false, "properties": { "overrides": { @@ -1633,6 +1648,7 @@ "Spec.Distribution.Modules.Logging.Minio": { "type": "object", "additionalProperties": false, + "description": "Configuration for Logging's MinIO deployment.", "properties": { "storageSize": { "type": "string", @@ -1659,10 +1675,12 @@ }, "Spec.Distribution.Modules.Logging.Loki": { "type": "object", + "description": "Configuration for the Loki package.", "additionalProperties": false, "properties": { "backend": { "type": "string", + "description": "The storage backend type for Loki. `minio` will use an in-cluster MinIO deployment for object storage, `externalEndpoint` can be used to point to an external object storage instead of deploying an in-cluster MinIO.", "enum": [ "minio", "externalEndpoint" @@ -1671,26 +1689,27 @@ "externalEndpoint": { "type": "object", "additionalProperties": false, + "description": "Configuration for Loki's external storage backend.", "properties": { "endpoint": { "type": "string", - "description": "The endpoint of the loki external endpoint" + "description": "External S3-compatible endpoint for Loki's storage." }, "insecure": { "type": "boolean", - "description": "If true, the loki external endpoint will be insecure" + "description": "If true, will use HTTP as protocol instead of HTTPS." }, "secretAccessKey": { "type": "string", - "description": "The secret access key of the loki external endpoint" + "description": "The secret access key (password) for the external S3-compatible bucket." }, "accessKeyId": { "type": "string", - "description": "The access key id of the loki external endpoint" + "description": "The access key ID (username) for the external S3-compatible bucket." }, "bucketName": { "type": "string", - "description": "The bucket name of the loki external endpoint" + "description": "The bucket name of the external S3-compatible object storage." } } }, @@ -1768,7 +1787,7 @@ "Spec.Distribution.Modules.Monitoring": { "type": "object", "additionalProperties": false, - "description": "configuration for the Monitoring module components", + "description": "Configuration for the Monitoring module.", "properties": { "type": { "type": "string", @@ -1778,7 +1797,7 @@ "prometheusAgent", "mimir" ], - "description": "The type of the monitoring, must be ***none***, ***prometheus***, ***prometheusAgent*** or ***mimir***.\n\n- `none`: will disable the whole monitoring stack.\n- `prometheus`: will install Prometheus Operator and a preconfigured Prometheus instance, Alertmanager, a set of alert rules, exporters needed to monitor all the components of the cluster, Grafana and a series of dashboards to view the collected metrics, and more.\n- `prometheusAgent`: wil install Prometheus operator, an instance of Prometheus in Agent mode (no alerting, no queries, no storage), and all the exporters needed to get metrics for the status of the cluster and the workloads. Useful when having a centralized (remote) Prometheus where to ship the metrics and not storing them locally in the cluster.\n- `mimir`: will install the same as the `prometheus` option, and in addition Grafana Mimir that allows for longer retention of metrics and the usage of Object Storage." + "description": "The type of the monitoring, must be `none`, `prometheus`, `prometheusAgent` or `mimir`.\n\n- `none`: will disable the whole monitoring stack.\n- `prometheus`: will install Prometheus Operator and a preconfigured Prometheus instace, Alertmanager, a set of alert rules, exporters needed to monitor all the components of the cluster, Grafana and a series of dashboards to view the collected metrics, and more.\n- `prometheusAgent`: wil install Prometheus operator, an instance of Prometheus in Agent mode (no alerting, no queries, no storage), and all the exporters needed to get metrics for the status of the cluster and the workloads. Useful when having a centralized (remote) Prometheus where to ship the metrics and not storing them locally in the cluster.\n- `mimir`: will install the same as the `prometheus` option, plus Grafana Mimir that allows for longer retention of metrics and the usage of Object Storage.\n\nDefault is `prometheus`." }, "overrides": { "$ref": "#/$defs/Types.FuryModuleOverrides" @@ -1863,7 +1882,10 @@ "type": "object", "additionalProperties": false, "properties": { - "deadManSwitchWebhookUrl": { + "resources": { + "$ref": "#/$defs/Types.KubeResources" + }, + "retentionTime": { "type": "string", "description": "The webhook url to send deadman switch monitoring, for example to use with healthchecks.io" }, @@ -1871,7 +1893,7 @@ "type": "boolean", "description": "If true, the default rules will be installed" }, - "slackWebhookUrl": { + "storageSize": { "type": "string", "description": "The slack webhook url to send alerts" } @@ -1924,10 +1946,11 @@ "Spec.Distribution.Modules.Monitoring.Mimir": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Mimir package.", "properties": { "retentionTime": { "type": "string", - "description": "The retention time for the mimir pods" + "description": "The retention time for the logs stored in Mimir. Default is `30d`. Value must match the regular expression `[0-9]+(ns|us|µs|ms|s|m|h|d|w|y)` where y = 365 days." }, "backend": { "type": "string", @@ -1935,31 +1958,32 @@ "minio", "externalEndpoint" ], - "description": "The backend for the mimir pods, must be ***minio*** or ***externalEndpoint***" + "description": "The storage backend type for Mimir. `minio` will use an in-cluster MinIO deployment for object storage, `externalEndpoint` can be used to point to an external S3-compatible object storage instead of deploying an in-cluster MinIO." }, "externalEndpoint": { "type": "object", "additionalProperties": false, + "description": "Configuration for Mimir's external storage backend.", "properties": { "endpoint": { "type": "string", - "description": "The endpoint of the external mimir backend" + "description": "External S3-compatible endpoint for Mimir's storage." }, "insecure": { "type": "boolean", - "description": "If true, the external mimir backend will not use tls" + "description": "If true, will use HTTP as protocol instead of HTTPS." }, "secretAccessKey": { "type": "string", - "description": "The secret access key of the external mimir backend" + "description": "The secret access key (password) for the external S3-compatible bucket." }, "accessKeyId": { "type": "string", - "description": "The access key id of the external mimir backend" + "description": "The access key ID (username) for the external S3-compatible bucket." }, "bucketName": { "type": "string", - "description": "The bucket name of the external mimir backend" + "description": "The bucket name of the external S3-compatible object storage." } } }, @@ -1970,11 +1994,12 @@ }, "Spec.Distribution.Modules.Monitoring.Minio": { "type": "object", + "description": "Configuration for Monitoring's MinIO deployment.", "additionalProperties": false, "properties": { "storageSize": { "type": "string", - "description": "The storage size for the minio pods" + "description": "The PVC size for each MinIO disk, 6 disks total." }, "rootUser": { "type": "object", @@ -1982,11 +2007,11 @@ "properties": { "username": { "type": "string", - "description": "The username for the minio root user" + "description": "The username for the default MinIO root user." }, "password": { "type": "string", - "description": "The password for the minio root user" + "description": "The password for the default MinIO root user." } } }, @@ -1998,6 +2023,7 @@ "Spec.Distribution.Modules.Tracing": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Tracing module.", "properties": { "overrides": { "$ref": "#/$defs/Types.FuryModuleOverrides" @@ -2070,6 +2096,7 @@ }, "Spec.Distribution.Modules.Tracing.Minio": { "type": "object", + "description": "Configuration for Tracing's MinIO deployment.", "additionalProperties": false, "properties": { "storageSize": { @@ -2098,9 +2125,10 @@ "Spec.Distribution.Modules.Networking": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Networking module.", "properties": { "overrides": { - "$ref": "#/$defs/Types.FuryModuleComponentOverrides" + "$ref": "#/$defs/Types.FuryModuleOverrides" }, "tigeraOperator": { "$ref": "#/$defs/Spec.Distribution.Modules.Networking.TigeraOperator" @@ -2125,6 +2153,7 @@ "Spec.Distribution.Modules.Policy": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Policy module.", "properties": { "overrides": { "$ref": "#/$defs/Types.FuryModuleOverrides" @@ -2136,7 +2165,7 @@ "gatekeeper", "kyverno" ], - "description": "The type of security to use, either ***none***, ***gatekeeper*** or ***kyverno***" + "description": "The type of policy enforcement to use, either `none`, `gatekeeper` or `kyverno`.\n\nDefault is `none`." }, "gatekeeper": { "$ref": "#/$defs/Spec.Distribution.Modules.Policy.Gatekeeper" @@ -2182,6 +2211,7 @@ "Spec.Distribution.Modules.Policy.Gatekeeper": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Gatekeeper package.", "properties": { "additionalExcludedNamespaces": { "type": "array", @@ -2197,11 +2227,11 @@ "dryrun", "warn" ], - "description": "The enforcement action to use for the gatekeeper module" + "description": "The default enforcement action to use for the included constraints. `deny` will block the admission when violations to the policies are found, `warn` will show a message to the user but will admit the violating requests and `dryrun` won't give any feedback to the user but it will log the violations." }, "installDefaultPolicies": { "type": "boolean", - "description": "If true, the default policies will be installed" + "description": "Set to `false` to avoid installing the default Gatekeeper policies (constraints templates and constraints) included with the distribution." }, "overrides": { "$ref": "#/$defs/Types.FuryModuleComponentOverrides" @@ -2215,13 +2245,14 @@ "Spec.Distribution.Modules.Policy.Kyverno": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Kyverno package.", "properties": { "additionalExcludedNamespaces": { "type": "array", "items": { "type": "string" }, - "description": "This parameter adds namespaces to Kyverno's exemption list, so it will not enforce the constraints on them." + "description": "This parameter adds namespaces to Kyverno's exemption list, so it will not enforce the policies on them." }, "validationFailureAction": { "type": "string", @@ -2229,11 +2260,11 @@ "Audit", "Enforce" ], - "description": "The validation failure action to use for the kyverno module" + "description": "The validation failure action to use for the policies, `Enforce` will block when a request does not comply with the policies and `Audit` will not block but log when a request does not comply with the policies." }, "installDefaultPolicies": { "type": "boolean", - "description": "If true, the default policies will be installed" + "description": "Set to `false` to avoid installing the default Kyverno policies included with distribution." }, "overrides": { "$ref": "#/$defs/Types.FuryModuleComponentOverrides" @@ -2373,6 +2404,7 @@ "Spec.Distribution.Modules.Auth": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Auth module.", "properties": { "overrides": { "$ref": "#/$defs/Spec.Distribution.Modules.Auth.Overrides" @@ -2526,6 +2558,7 @@ "Spec.Distribution.Modules.Auth.Provider.BasicAuth": { "type": "object", "additionalProperties": false, + "description": "Configuration for the HTTP Basic Auth provider.", "properties": { "username": { "type": "string", @@ -2872,6 +2905,41 @@ "Types.FuryModuleOverrides": { "type": "object", "additionalProperties": false, + "properties": { + "requests": { + "type": "object", + "additionalProperties": false, + "properties": { + "cpu": { + "type": "string", + "description": "The CPU request for the Pod, in cores. Example: `500m`." + }, + "memory": { + "type": "string", + "description": "The memory request for the Pod. Example: `500M`." + } + } + }, + "limits": { + "type": "object", + "additionalProperties": false, + "properties": { + "cpu": { + "type": "string", + "description": "The CPU limit for the Pod. Example: `1000m`." + }, + "memory": { + "type": "string", + "description": "The memory limit for the Pod. Example: `1G`." + } + } + } + } + }, + "Types.FuryModuleOverrides": { + "type": "object", + "description": "Override the common configuration with a particular configuration for the module.", + "additionalProperties": false, "properties": { "nodeSelector": { "$ref": "#/$defs/Types.KubeNodeSelector", @@ -2944,11 +3012,11 @@ "properties": { "disableAuth": { "type": "boolean", - "description": "If true, the ingress will not have authentication" + "description": "If true, the ingress will not have authentication even if `.spec.modules.auth.provider.type` is SSO or Basic Auth." }, "host": { "type": "string", - "description": "The host of the ingress" + "description": "Use this host for the ingress instead of the default one." }, "ingressClass": { "type": "string", diff --git a/schemas/public/ekscluster-kfd-v1alpha2.json b/schemas/public/ekscluster-kfd-v1alpha2.json index 7c0f91e64..dae6fd51e 100644 --- a/schemas/public/ekscluster-kfd-v1alpha2.json +++ b/schemas/public/ekscluster-kfd-v1alpha2.json @@ -1,6 +1,6 @@ { "$schema": "http://json-schema.org/draft-07/schema#", - "description": "A Fury Cluster deployed through AWS's Elastic Kubernetes Service", + "description": "A KFD Cluster deployed on top of AWS's Elastic Kubernetes Service (EKS).", "type": "object", "properties": { "apiVersion": { @@ -34,6 +34,7 @@ "properties": { "name": { "type": "string", + "description": "The name of the cluster. It will also be used as a prefix for all the other resources created.", "minLength": 1, "maxLength": 56 } @@ -48,17 +49,20 @@ "properties": { "distributionVersion": { "type": "string", + "description": "Defines which KFD version will be installed and, in consequence, the Kubernetes version used to create the cluster. It supports git tags and branches. Example: `v1.30.1`.", "minLength": 1 }, "region": { - "$ref": "#/$defs/Types.AwsRegion" + "$ref": "#/$defs/Types.AwsRegion", + "description": "Defines in which AWS region the cluster and all the related resources will be created." }, "tags": { "$ref": "#/$defs/Types.AwsTags", "description": "This map defines which will be the common tags that will be added to all the resources created on AWS." }, "toolsConfiguration": { - "$ref": "#/$defs/Spec.ToolsConfiguration" + "$ref": "#/$defs/Spec.ToolsConfiguration", + "description": "Configuration for tools used by furyctl, like Terraform." }, "infrastructure": { "$ref": "#/$defs/Spec.Infrastructure" @@ -155,6 +159,7 @@ "Spec.ToolsConfiguration.Terraform.State": { "type": "object", "additionalProperties": false, + "description": "Configuration for storing the Terraform state of the cluster.", "properties": { "s3": { "$ref": "#/$defs/Spec.ToolsConfiguration.Terraform.State.S3" @@ -167,22 +172,23 @@ "Spec.ToolsConfiguration.Terraform.State.S3": { "type": "object", "additionalProperties": false, + "description": "Configuration for the S3 bucket used to store the Terraform state.", "properties": { "bucketName": { "$ref": "#/$defs/Types.AwsS3BucketName", - "description": "This value defines which bucket will be used to store all the states" + "description": "This value defines which bucket will be used to store all the states." }, "keyPrefix": { "$ref": "#/$defs/Types.AwsS3KeyPrefix", - "description": "This value defines which folder will be used to store all the states inside the bucket" + "description": "This value defines which folder will be used to store all the states inside the bucket." }, "region": { "$ref": "#/$defs/Types.AwsRegion", - "description": "This value defines in which region the bucket is located" + "description": "This value defines in which region the bucket is located." }, "skipRegionValidation": { "type": "boolean", - "description": "This value defines if the region of the bucket should be validated or not by Terraform, useful when using a bucket in a recently added region" + "description": "This value defines if the region of the bucket should be validated or not by Terraform, useful when using a bucket in a recently added region." } }, "required": [ @@ -196,12 +202,10 @@ "additionalProperties": false, "properties": { "vpc": { - "$ref": "#/$defs/Spec.Infrastructure.Vpc", - "description": "This key defines the VPC that will be created in AWS" + "$ref": "#/$defs/Spec.Infrastructure.Vpc" }, "vpn": { - "$ref": "#/$defs/Spec.Infrastructure.Vpn", - "description": "This section defines the creation of VPN bastions" + "$ref": "#/$defs/Spec.Infrastructure.Vpn" } }, "allOf": [ @@ -279,6 +283,7 @@ }, "Spec.Infrastructure.Vpc": { "type": "object", + "description": "Configuration for the VPC that will be created to host the EKS cluster and its related resources. If you already have a VPC that you want to use, leave this section empty and use `.spec.kubernetes.vpcId` instead.", "additionalProperties": false, "properties": { "network": { @@ -295,7 +300,7 @@ "properties": { "cidr": { "$ref": "#/$defs/Types.Cidr", - "description": "This is the CIDR of the VPC that will be created" + "description": "The network CIDR for the VPC that will be created" }, "subnetsCidrs": { "$ref": "#/$defs/Spec.Infrastructure.Vpc.Network.SubnetsCidrs" @@ -308,6 +313,7 @@ }, "Spec.Infrastructure.Vpc.Network.SubnetsCidrs": { "type": "object", + "description": "Network CIDRS configuration for private and public subnets.", "additionalProperties": false, "properties": { "private": { @@ -315,14 +321,14 @@ "items": { "$ref": "#/$defs/Types.Cidr" }, - "description": "These are the CIRDs for the private subnets, where the nodes, the pods, and the private load balancers will be created" + "description": "Network CIRDs for the private subnets, where the nodes, the pods, and the private load balancers will be created" }, "public": { "type": "array", "items": { "$ref": "#/$defs/Types.Cidr" }, - "description": "These are the CIDRs for the public subnets, where the public load balancers and the VPN servers will be created" + "description": "Network CIDRs for the public subnets, where the public load balancers and the VPN servers will be created" } }, "required": [ @@ -332,50 +338,51 @@ }, "Spec.Infrastructure.Vpn": { "type": "object", + "description": "Configuration for the VPN server instances.", "additionalProperties": false, "properties": { "instances": { "type": "integer", - "description": "The number of instances to create, 0 to skip the creation" + "description": "The number of VPN server instances to create, `0` to skip the creation." }, "port": { "$ref": "#/$defs/Types.TcpPort", - "description": "The port used by the OpenVPN server" + "description": "The port where each OpenVPN server will listen for connections." }, "instanceType": { "type": "string", - "description": "The size of the AWS EC2 instance" + "description": "The type of the AWS EC2 instance for each VPN server. Follows AWS EC2 nomenclature. Example: `t3-micro`." }, "diskSize": { "type": "integer", - "description": "The size of the disk in GB" + "description": "The size of the disk in GB for each VPN server. Example: entering `50` will create disks of 50 GB." }, "operatorName": { "type": "string", - "description": "The username of the account to create in the bastion's operating system" + "description": "The username of the account to create in the bastion's operating system." }, "dhParamsBits": { "type": "integer", - "description": "The dhParamsBits size used for the creation of the .pem file that will be used in the dh openvpn server.conf file" + "description": "The `dhParamsBits` size used for the creation of the .pem file that will be used in the dh openvpn server.conf file." }, "vpnClientsSubnetCidr": { "$ref": "#/$defs/Types.Cidr", - "description": "The CIDR that will be used to assign IP addresses to the VPN clients when connected" + "description": "The network CIDR that will be used to assign IP addresses to the VPN clients when connected." }, "ssh": { "$ref": "#/$defs/Spec.Infrastructure.Vpn.Ssh" }, "vpcId": { "$ref": "#/$defs/Types.AwsVpcId", - "description": "The VPC ID where the VPN servers will be created, required only if .spec.infrastructure.vpc is omitted" + "description": "The ID of the VPC where the VPN server instances will be created, required only if `.spec.infrastructure.vpc` is omitted." }, "bucketNamePrefix": { "$ref": "#/$defs/Types.AwsS3BucketNamePrefix", - "description": "This value defines the prefix that will be used to create the bucket name where the VPN servers will store the states" + "description": "This value defines the prefix for the bucket name where the VPN servers will store their state (VPN certificates, users)." }, "iamUserNameOverride": { "$ref": "#/$defs/Types.AwsIamRoleName", - "description": "Overrides the default IAM user name for the VPN" + "description": "Overrides IAM user name for the VPN. Default is to use the cluster name." } }, "required": [ @@ -399,7 +406,7 @@ } ] }, - "description": "This value defines the public keys that will be added to the bastion's operating system NOTES: Not yet implemented" + "description": "**NOT IN USE**, use `githubUsersName` instead. This value defines the public keys that will be added to the bastion's operating system." }, "githubUsersName": { "type": "array", @@ -407,14 +414,14 @@ "type": "string" }, "minItems": 1, - "description": "The github user name list that will be used to get the ssh public key that will be added as authorized key to the operatorName user" + "description": "List of GitHub usernames from whom get their SSH public key and add as authorized keys of the `operatorName` user." }, "allowedFromCidrs": { "type": "array", "items": { "$ref": "#/$defs/Types.Cidr" }, - "description": "The CIDR enabled in the security group that can access the bastions in SSH" + "description": "The network CIDR enabled in the security group to access the VPN servers (bastions) via SSH. Setting this to `0.0.0.0/0` will allow any source." } }, "required": [ @@ -424,33 +431,34 @@ }, "Spec.Kubernetes": { "type": "object", + "description": "Defines the Kubernetes components configuration and the values needed for the `kubernetes` phase of furyctl.", "additionalProperties": false, "properties": { "vpcId": { "$ref": "#/$defs/Types.AwsVpcId", - "description": "This value defines the VPC ID where the EKS cluster will be created, required only if .spec.infrastructure.vpc is omitted" + "description": "Required only if `.spec.infrastructure.vpc` is omitted. This value defines the ID of the VPC where the EKS cluster and its related resources will be created." }, "clusterIAMRoleNamePrefixOverride": { "$ref": "#/$defs/Types.AwsIamRoleNamePrefix", - "description": "Overrides the default IAM role name prefix for the EKS cluster" + "description": "Overrides the default prefix for the IAM role name of the EKS cluster. If not set, a name will be generated from the cluster name." }, "workersIAMRoleNamePrefixOverride": { "$ref": "#/$defs/Types.AwsIamRoleNamePrefix", - "description": "Overrides the default IAM role name prefix for the EKS workers" + "description": "Overrides the default prefix for the IAM role name of the EKS workers. If not set, a name will be generated from the cluster name." }, "subnetIds": { "type": "array", "items": { "$ref": "#/$defs/Types.AwsSubnetId" }, - "description": "This value defines the subnet IDs where the EKS cluster will be created, required only if .spec.infrastructure.vpc is omitted" + "description": "Required only if `.spec.infrastructure.vpc` is omitted. This value defines the ID of the subnet where the EKS cluster will be created." }, "apiServer": { "$ref": "#/$defs/Spec.Kubernetes.APIServer" }, "serviceIpV4Cidr": { "$ref": "#/$defs/Types.Cidr", - "description": "This value defines the CIDR that will be used to assign IP addresses to the services" + "description": "This value defines the network CIDR that will be used to assign IP addresses to Kubernetes services." }, "nodeAllowedSshPublicKey": { "anyOf": [ @@ -461,7 +469,7 @@ "$ref": "#/$defs/Types.FileRef" } ], - "description": "This key contains the ssh public key that can connect to the nodes via SSH using the ec2-user user" + "description": "The SSH public key that can connect to the nodes via SSH using the `ec2-user` user. Example: the contents of your `~/.ssh/id_ras.pub` file." }, "nodePoolsLaunchKind": { "type": "string", @@ -470,7 +478,7 @@ "launch_templates", "both" ], - "description": "Either `launch_configurations`, `launch_templates` or `both`. For new clusters use `launch_templates`, for existing cluster you'll need to migrate from `launch_configurations` to `launch_templates` using `both` as interim." + "description": "Accepted values are `launch_configurations`, `launch_templates` or `both`. For new clusters use `launch_templates`, for adopting existing cluster you'll need to migrate from `launch_configurations` to `launch_templates` using `both` as interim." }, "nodePoolGlobalAmiType": { "type": "string", @@ -482,7 +490,32 @@ }, "logRetentionDays": { "type": "integer", - "description": "Optional Kubernetes Cluster log retention in days. Defaults to 90 days." + "description": "Optional Kubernetes Cluster log retention in CloudWatch, expressed in days. Setting the value to zero (`0`) makes retention last forever. Default is `90` days.", + "enum": [ + 0, + 1, + 3, + 5, + 7, + 14, + 30, + 60, + 90, + 120, + 150, + 180, + 365, + 400, + 545, + 731, + 1096, + 1827, + 2192, + 2557, + 2922, + 3288, + 3653 + ] }, "logsTypes": { "type": "array", @@ -522,7 +555,7 @@ "properties": { "privateAccess": { "type": "boolean", - "description": "This value defines if the API server will be accessible only from the private subnets" + "description": "This value defines if the Kubernetes API server will be accessible from the private subnets. Default it `true`." }, "privateAccessCidrs": { "type": "array", @@ -530,7 +563,7 @@ "$ref": "#/$defs/Types.Cidr" }, "minItems": 0, - "description": "This value defines the CIDRs that will be allowed to access the API server from the private subnets" + "description": "The network CIDRs from the private subnets that will be allowed access the Kubernetes API server." }, "publicAccessCidrs": { "type": "array", @@ -538,11 +571,11 @@ "$ref": "#/$defs/Types.Cidr" }, "minItems": 0, - "description": "This value defines the CIDRs that will be allowed to access the API server from the public subnets" + "description": "The network CIDRs from the public subnets that will be allowed access the Kubernetes API server." }, "publicAccess": { "type": "boolean", - "description": "This value defines if the API server will be accessible from the public subnets" + "description": "This value defines if the Kubernetes API server will be accessible from the public subnets. Default is `false`." } }, "required": [ @@ -553,6 +586,7 @@ "Spec.Kubernetes.NodePool": { "type": "object", "additionalProperties": false, + "description": "Array with all the node pool definitions that will join the cluster. Each item is an object.", "properties": { "type": { "description": "The type of Node Pool, can be `self-managed` for using customization like custom AMI, set max pods per node or `eks-managed` for using prebuilt AMIs from Amazon via the `ami.type` field. It is recommended to use `self-managed`.", @@ -575,7 +609,7 @@ "docker", "containerd" ], - "description": "The container runtime to use for the nodes" + "description": "The container runtime to use in the nodes of the node pool. Default is `containerd`." }, "size": { "$ref": "#/$defs/Spec.Kubernetes.NodePool.Size" @@ -588,26 +622,26 @@ "items": { "$ref": "#/$defs/Types.AwsArn" }, - "description": "This optional array defines additional target groups to attach to the instances in the node pool" + "description": "This optional array defines additional target groups to attach to the instances in the node pool." }, "labels": { "$ref": "#/$defs/Types.KubeLabels", - "description": "Kubernetes labels that will be added to the nodes" + "description": "Kubernetes labels that will be added to the nodes." }, "taints": { "$ref": "#/$defs/Types.KubeTaints", - "description": "Kubernetes taints that will be added to the nodes" + "description": "Kubernetes taints that will be added to the nodes." }, "tags": { "$ref": "#/$defs/Types.AwsTags", - "description": "AWS tags that will be added to the ASG and EC2 instances" + "description": "AWS tags that will be added to the ASG and EC2 instances." }, "subnetIds": { "type": "array", "items": { "$ref": "#/$defs/Types.AwsSubnetId" }, - "description": "This value defines the subnet IDs where the nodes will be created" + "description": "Optional list of subnet IDs where to create the nodes." }, "additionalFirewallRules": { "$ref": "#/$defs/Spec.Kubernetes.NodePool.AdditionalFirewallRules" @@ -717,21 +751,23 @@ "Spec.Kubernetes.NodePool.Instance": { "type": "object", "additionalProperties": false, + "description": "Configuration for the instances that will be used in the node pool.", "properties": { "type": { "type": "string", - "description": "The instance type to use for the nodes" + "description": "The instance type to use for the nodes." }, "spot": { "type": "boolean", - "description": "If true, the nodes will be created as spot instances" + "description": "If `true`, the nodes will be created as spot instances. Default is `false`." }, "volumeSize": { "type": "integer", - "description": "The size of the disk in GB" + "description": "The size of the disk in GB." }, "volumeType": { "type": "string", + "description": "Volume type for the instance disk. Default is `gp2`.", "enum": [ "gp2", "gp3", @@ -740,7 +776,8 @@ ] }, "maxPods": { - "type": "integer" + "type": "integer", + "description": "Set the maximum pods per node to a custom value. If not set will use EKS default value that depends on the instance type.\n\nRef: https://github.com/awslabs/amazon-eks-ami/blob/main/templates/shared/runtime/eni-max-pods.txt" } }, "required": [ @@ -754,12 +791,12 @@ "min": { "type": "integer", "minimum": 0, - "description": "The minimum number of nodes in the node pool" + "description": "The minimum number of nodes in the node pool." }, "max": { "type": "integer", "minimum": 0, - "description": "The maximum number of nodes in the node pool" + "description": "The maximum number of nodes in the node pool." } }, "required": [ @@ -770,6 +807,7 @@ "Spec.Kubernetes.NodePool.AdditionalFirewallRules": { "type": "object", "additionalProperties": false, + "description": "Optional additional firewall rules that will be attached to the nodes.", "properties": { "cidrBlocks": { "type": "array", @@ -777,7 +815,8 @@ "$ref": "#/$defs/Spec.Kubernetes.NodePool.AdditionalFirewallRule.CidrBlock" }, "minItems": 1, - "description": "The CIDR blocks for the FW rule. At the moment the first item of the list will be used, others will be ignored." + "maxItems": 1, + "description": "The CIDR blocks objects definition for the Firewall rule. Even though it is a list, only one item is currently supported. See https://github.com/sighupio/fury-eks-installer/issues/46 for more details." }, "sourceSecurityGroupId": { "type": "array", @@ -804,13 +843,15 @@ }, "type": { "type": "string", + "description": "The type of the Firewall rule, can be `ingress` for incoming traffic or `egress` for outgoing traffic.", "enum": [ "ingress", "egress" ] }, "tags": { - "$ref": "#/$defs/Types.AwsTags" + "$ref": "#/$defs/Types.AwsTags", + "description": "Additional AWS tags for the Firewall rule." }, "cidrBlocks": { "type": "array", @@ -840,7 +881,7 @@ "properties": { "name": { "type": "string", - "description": "The name of the FW rule" + "description": "The name for the additional Firewall rule Security Group." }, "type": { "type": "string", @@ -848,19 +889,19 @@ "ingress", "egress" ], - "description": "The type of the FW rule can be ingress or egress" + "description": "The type of the Firewall rule, can be `ingress` for incoming traffic or `egress` for outgoing traffic." }, "tags": { "$ref": "#/$defs/Types.AwsTags", - "description": "The tags of the FW rule" + "description": "Additional AWS tags for the Firewall rule." }, "sourceSecurityGroupId": { "type": "string", - "description": "The source security group ID" + "description": "The source security group ID." }, "protocol": { "$ref": "#/$defs/Types.AwsIpProtocol", - "description": "The protocol of the FW rule" + "description": "The protocol of the Firewall rule." }, "ports": { "$ref": "#/$defs/Spec.Kubernetes.NodePool.AdditionalFirewallRule.Ports" @@ -880,7 +921,7 @@ "properties": { "name": { "type": "string", - "description": "The name of the FW rule" + "description": "The name of the Firewall rule." }, "type": { "type": "string", @@ -888,19 +929,19 @@ "ingress", "egress" ], - "description": "The type of the FW rule can be ingress or egress" + "description": "The type of the Firewall rule, can be `ingress` for incoming traffic or `egress` for outgoing traffic." }, "tags": { "$ref": "#/$defs/Types.AwsTags", - "description": "The tags of the FW rule" + "description": "Additional AWS tags for the Firewall rule." }, "self": { "type": "boolean", - "description": "If true, the source will be the security group itself" + "description": "If `true`, the source will be the security group itself." }, "protocol": { "$ref": "#/$defs/Types.AwsIpProtocol", - "description": "The protocol of the FW rule" + "description": "The protocol of the Firewall rule." }, "ports": { "$ref": "#/$defs/Spec.Kubernetes.NodePool.AdditionalFirewallRule.Ports" @@ -916,6 +957,7 @@ }, "Spec.Kubernetes.NodePool.AdditionalFirewallRule.Ports": { "type": "object", + "description": "Port range for the Firewall Rule.", "additionalProperties": false, "properties": { "from": { @@ -932,6 +974,7 @@ }, "Spec.Kubernetes.AwsAuth": { "type": "object", + "description": "Optional additional security configuration for EKS IAM via the `aws-auth` configmap.\n\nRef: https://docs.aws.amazon.com/eks/latest/userguide/auth-configmap.html", "additionalProperties": false, "properties": { "additionalAccounts": { @@ -939,21 +982,21 @@ "items": { "type": "string" }, - "description": "This optional array defines additional AWS accounts that will be added to the aws-auth configmap" + "description": "This optional array defines additional AWS accounts that will be added to the `aws-auth` configmap." }, "users": { "type": "array", "items": { "$ref": "#/$defs/Spec.Kubernetes.AwsAuth.User" }, - "description": "This optional array defines additional IAM users that will be added to the aws-auth configmap" + "description": "This optional array defines additional IAM users that will be added to the `aws-auth` configmap." }, "roles": { "type": "array", "items": { "$ref": "#/$defs/Spec.Kubernetes.AwsAuth.Role" }, - "description": "This optional array defines additional IAM roles that will be added to the aws-auth configmap" + "description": "This optional array defines additional IAM roles that will be added to the `aws-auth` configmap." } } }, @@ -1090,28 +1133,29 @@ "Spec.Distribution.Common": { "type": "object", "additionalProperties": false, + "description": "Common configuration for all the distribution modules.", "properties": { "nodeSelector": { "$ref": "#/$defs/Types.KubeNodeSelector", - "description": "The node selector to use to place the pods for all the KFD modules" + "description": "The node selector to use to place the pods for all the KFD modules. Follows Kubernetes selector format. Example: `node.kubernetes.io/role: infra`." }, "tolerations": { "type": "array", "items": { "$ref": "#/$defs/Types.KubeToleration" }, - "description": "The tolerations that will be added to the pods for all the KFD modules" + "description": "An array with the tolerations that will be added to the pods for all the KFD modules. Follows Kubernetes tolerations format. Example:\n\n```yaml\n- effect: NoSchedule\n key: node.kubernetes.io/role\n value: infra\n```" }, "provider": { "$ref": "#/$defs/Spec.Distribution.Common.Provider" }, "relativeVendorPath": { "type": "string", - "description": "The relative path to the vendor directory, does not need to be changed" + "description": "The relative path to the vendor directory, does not need to be changed." }, "registry": { "type": "string", - "description": "URL of the registry where to pull images from for the Distribution phase. (Default is registry.sighup.io/fury).\n\nNOTE: If plugins are pulling from the default registry, the registry will be replaced for these plugins too." + "description": "URL of the registry where to pull images from for the Distribution phase. (Default is `registry.sighup.io/fury`)." } } }, @@ -1121,7 +1165,7 @@ "properties": { "type": { "type": "string", - "description": "The type of the provider, must be EKS if specified" + "description": "The provider type. Don't set. FOR INTERNAL USE ONLY." } }, "required": [ @@ -1176,14 +1220,15 @@ }, "baseDomain": { "type": "string", - "description": "the base domain used for all the KFD ingresses, if in the nginx dual configuration, it should be the same as the .spec.distribution.modules.ingress.dns.private.name zone" + "description": "The base domain used for all the KFD ingresses. If in the nginx `dual` configuration type, this value should be the same as the `.spec.distribution.modules.ingress.dns.private.name` zone." }, "nginx": { "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.Nginx", - "description": "Configurations for the nginx ingress controller module" + "description": "Configurations for the Ingress nginx controller package." }, "certManager": { - "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.CertManager" + "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.CertManager", + "description": "Configuration for the cert-manager package. Required even if `ingress.nginx.type` is `none`, cert-manager is used for managing other certificates in the cluster besides the TLS termination certificates for the ingresses." }, "dns": { "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.DNS" @@ -1275,20 +1320,21 @@ "Spec.Distribution.Modules.Ingress.Overrides": { "type": "object", "additionalProperties": false, + "description": "Override the common configuration with a particular configuration for the Ingress module.", "properties": { "ingresses": { "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.Overrides.Ingresses" }, "nodeSelector": { "$ref": "#/$defs/Types.KubeNodeSelector", - "description": "The node selector to use to place the pods for the ingress module" + "description": "Set to override the node selector used to place the pods of the Ingress module." }, "tolerations": { "type": "array", "items": { "$ref": "#/$defs/Types.KubeToleration" }, - "description": "The tolerations that will be added to the pods for the ingress module" + "description": "Set to override the tolerations that will be added to the pods of the Ingress module." } } }, @@ -1321,7 +1367,7 @@ "single", "dual" ], - "description": "The type of the nginx ingress controller, must be ***none***, ***single*** or ***dual***" + "description": "The type of the Ingress nginx controller, options are:\n- `none`: no ingress controller will be installed and no infrastructural ingresses will be created.\n- `single`: a single ingress controller with ingress class `nginx` will be installed to manage all the ingress resources, infrastructural ingresses will be created.\n- `dual`: two independent ingress controllers will be installed, one for the `internal` ingress class intended for private ingresses and one for the `external` ingress class intended for public ingresses. KFD infrastructural ingresses wil use the `internal` ingress class when using the dual type.\n\nDefault is `single`." }, "tls": { "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.Nginx.TLS" @@ -1345,7 +1391,7 @@ "secret", "none" ], - "description": "The provider of the TLS certificate, must be ***none***, ***certManager*** or ***secret***" + "description": "The provider of the TLS certificates for the ingresses, one of: `none`, `certManager`, or `secret`." }, "secret": { "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.Nginx.TLS.Secret" @@ -1370,16 +1416,19 @@ "Spec.Distribution.Modules.Ingress.Nginx.TLS.Secret": { "type": "object", "additionalProperties": false, + "description": "Kubernetes TLS secret for the ingresses TLS certificate.", "properties": { "cert": { "type": "string", - "description": "The certificate file content or you can use the file notation to get the content from a file" + "description": "The certificate file's content. You can use the `\"{file://}\"` notation to get the content from a file." }, "key": { - "type": "string" + "type": "string", + "description": "The signing key file's content. You can use the `\"{file://}\"` notation to get the content from a file." }, "ca": { - "type": "string" + "type": "string", + "description": "The Certificate Authority certificate file's content. You can use the `\"{file://}\"` notation to get the content from a file." } }, "required": [ @@ -1391,6 +1440,7 @@ "Spec.Distribution.Modules.Ingress.CertManager": { "type": "object", "additionalProperties": false, + "description": "Configuration for the cert-manager package. Required even if `ingress.nginx.type` is `none`, cert-manager is used for managing other certificates in the cluster besides the TLS termination certificates for the ingresses.", "properties": { "clusterIssuer": { "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.CertManager.ClusterIssuer" @@ -1406,15 +1456,16 @@ "Spec.Distribution.Modules.Ingress.CertManager.ClusterIssuer": { "type": "object", "additionalProperties": false, + "description": "Configuration for the cert-manager's ACME clusterIssuer used to request certificates from Let's Encrypt.", "properties": { "name": { "type": "string", - "description": "The name of the cluster issuer" + "description": "Name of the clusterIssuer." }, "email": { "type": "string", "format": "email", - "description": "The email of the cluster issuer" + "description": "The email address to use during the certificate issuing process." }, "type": { "type": "string", @@ -1422,11 +1473,11 @@ "dns01", "http01" ], - "description": "The type of the cluster issuer, must be ***dns01*** or ***http01***" + "description": "The type of the clusterIssuer, must be `dns01` for using DNS challenge or `http01` for using HTTP challenge." }, "solvers": { "type": "array", - "description": "The custom solvers configurations" + "description": "List of challenge solvers to use instead of the default one for the `http01` challenge." } }, "required": [ @@ -1448,6 +1499,7 @@ }, "Spec.Distribution.Modules.Ingress.DNS": { "type": "object", + "description": "DNS definition, used in conjunction with `externalDNS` package to automate DNS management and certificates emission.", "additionalProperties": false, "properties": { "public": { @@ -1467,11 +1519,11 @@ "properties": { "name": { "type": "string", - "description": "The name of the public hosted zone" + "description": "The name of the public hosted zone." }, "create": { "type": "boolean", - "description": "If true, the public hosted zone will be created" + "description": "By default, a Terraform data source will be used to get the public DNS zone. Set to `true` to create the public zone instead." } }, "required": [ @@ -1481,15 +1533,16 @@ }, "Spec.Distribution.Modules.Ingress.DNS.Private": { "type": "object", + "description": "The private DNS zone is used only when `ingress.nginx.type` is `dual`, for exposing infrastructural services only in the private DNS zone.", "additionalProperties": false, "properties": { "name": { "type": "string", - "description": "The name of the private hosted zone" + "description": "The name of the private hosted zone. Example: `internal.fury-demo.sighup.io`." }, "create": { "type": "boolean", - "description": "If true, the private hosted zone will be created" + "description": "By default, a Terraform data source will be used to get the private DNS zone. Set to `true` to create the private zone instead." } }, "required": [ @@ -1500,6 +1553,7 @@ "Spec.Distribution.Modules.Logging": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Logging module.", "properties": { "overrides": { "$ref": "#/$defs/Types.FuryModuleOverrides" @@ -1512,7 +1566,7 @@ "loki", "customOutputs" ], - "description": "selects the logging stack. Choosing none will disable the centralized logging. Choosing opensearch will deploy and configure the Logging Operator and an OpenSearch cluster (can be single or triple for HA) where the logs will be stored. Choosing loki will use a distributed Grafana Loki instead of OpenSearh for storage. Choosing customOuput the Logging Operator will be deployed and installed but with no local storage, you will have to create the needed Outputs and ClusterOutputs to ship the logs to your desired storage." + "description": "Selects the logging stack. Options are:\n- `none`: will disable the centralized logging.\n- `opensearch`: will deploy and configure the Logging Operator and an OpenSearch cluster (can be single or triple for HA) where the logs will be stored.\n- `loki`: will use a distributed Grafana Loki instead of OpenSearh for storage.\n- `customOuputs`: the Logging Operator will be deployed and installed but with no local storage, you will have to create the needed Outputs and ClusterOutputs to ship the logs to your desired storage.\n\nDefault is `opensearch`." }, "opensearch": { "$ref": "#/$defs/Spec.Distribution.Modules.Logging.Opensearch" @@ -1591,14 +1645,14 @@ "single", "triple" ], - "description": "The type of the opensearch, must be ***single*** or ***triple***" + "description": "The type of OpenSearch deployment. One of: `single` for a single replica or `triple` for an HA 3-replicas deployment." }, "resources": { "$ref": "#/$defs/Types.KubeResources" }, "storageSize": { "type": "string", - "description": "The storage size for the opensearch pods" + "description": "The storage size for the OpenSearch volumes." }, "overrides": { "$ref": "#/$defs/Types.FuryModuleComponentOverrides" @@ -1610,6 +1664,7 @@ }, "Spec.Distribution.Modules.Logging.Cerebro": { "type": "object", + "description": "DEPRECATED since KFD v1.26.6, 1.27.5, v1.28.0.", "additionalProperties": false, "properties": { "overrides": { @@ -1620,10 +1675,11 @@ "Spec.Distribution.Modules.Logging.Minio": { "type": "object", "additionalProperties": false, + "description": "Configuration for Logging's MinIO deployment.", "properties": { "storageSize": { "type": "string", - "description": "The PVC size for each minio disk, 6 disks total" + "description": "The PVC size for each MinIO disk, 6 disks total." }, "rootUser": { "type": "object", @@ -1631,11 +1687,11 @@ "properties": { "username": { "type": "string", - "description": "The username of the minio root user" + "description": "The username for the default MinIO root user." }, "password": { "type": "string", - "description": "The password of the minio root user" + "description": "The password for the default MinIO root user." } } }, @@ -1646,10 +1702,12 @@ }, "Spec.Distribution.Modules.Logging.Loki": { "type": "object", + "description": "Configuration for the Loki package.", "additionalProperties": false, "properties": { "backend": { "type": "string", + "description": "The storage backend type for Loki. `minio` will use an in-cluster MinIO deployment for object storage, `externalEndpoint` can be used to point to an external object storage instead of deploying an in-cluster MinIO.", "enum": [ "minio", "externalEndpoint" @@ -1658,26 +1716,27 @@ "externalEndpoint": { "type": "object", "additionalProperties": false, + "description": "Configuration for Loki's external storage backend.", "properties": { "endpoint": { "type": "string", - "description": "The endpoint of the loki external endpoint" + "description": "External S3-compatible endpoint for Loki's storage." }, "insecure": { "type": "boolean", - "description": "If true, the loki external endpoint will be insecure" + "description": "If true, will use HTTP as protocol instead of HTTPS." }, "secretAccessKey": { "type": "string", - "description": "The secret access key of the loki external endpoint" + "description": "The secret access key (password) for the external S3-compatible bucket." }, "accessKeyId": { "type": "string", - "description": "The access key id of the loki external endpoint" + "description": "The access key ID (username) for the external S3-compatible bucket." }, "bucketName": { "type": "string", - "description": "The bucket name of the loki external endpoint" + "description": "The bucket name of the external S3-compatible object storage." } } }, @@ -1697,6 +1756,7 @@ "Spec.Distribution.Modules.Logging.Operator": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Logging Operator.", "properties": { "overrides": { "$ref": "#/$defs/Types.FuryModuleComponentOverrides" @@ -1704,41 +1764,41 @@ } }, "Spec.Distribution.Modules.Logging.CustomOutputs": { - "description": "when using the customOutputs logging type, you need to manually specify the spec of the several Output and ClusterOutputs that the Logging Operator expects to forward the logs collected by the pre-defined flows.", + "description": "When using the `customOutputs` logging type, you need to manually specify the spec of the several `Output` and `ClusterOutputs` that the Logging Operator expects to forward the logs collected by the pre-defined flows.", "type": "object", "additionalProperties": false, "properties": { "audit": { "type": "string", - "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow." + "description": "This value defines where the output from the `audit` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`" }, "events": { "type": "string", - "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow." + "description": "This value defines where the output from the `events` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`" }, "infra": { "type": "string", - "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow." + "description": "This value defines where the output from the `infra` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`" }, "ingressNginx": { "type": "string", - "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow." + "description": "This value defines where the output from the `ingressNginx` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`" }, "kubernetes": { "type": "string", - "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow." + "description": "This value defines where the output from the `kubernetes` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`" }, "systemdCommon": { "type": "string", - "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow." + "description": "This value defines where the output from the `systemdCommon` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`" }, "systemdEtcd": { "type": "string", - "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow." + "description": "This value defines where the output from the `systemdEtcd` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`" }, "errors": { "type": "string", - "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow." + "description": "This value defines where the output from the `errors` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`" } }, "required": [ @@ -1755,7 +1815,7 @@ "Spec.Distribution.Modules.Monitoring": { "type": "object", "additionalProperties": false, - "description": "configuration for the Monitoring module components", + "description": "Configuration for the Monitoring module.", "properties": { "type": { "type": "string", @@ -1765,7 +1825,7 @@ "prometheusAgent", "mimir" ], - "description": "The type of the monitoring, must be ***none***, ***prometheus***, ***prometheusAgent*** or ***mimir***.\n\n- `none`: will disable the whole monitoring stack.\n- `prometheus`: will install Prometheus Operator and a preconfigured Prometheus instance, Alertmanager, a set of alert rules, exporters needed to monitor all the components of the cluster, Grafana and a series of dashboards to view the collected metrics, and more.\n- `prometheusAgent`: wil install Prometheus operator, an instance of Prometheus in Agent mode (no alerting, no queries, no storage), and all the exporters needed to get metrics for the status of the cluster and the workloads. Useful when having a centralized (remote) Prometheus where to ship the metrics and not storing them locally in the cluster.\n- `mimir`: will install the same as the `prometheus` option, and in addition Grafana Mimir that allows for longer retention of metrics and the usage of Object Storage." + "description": "The type of the monitoring, must be `none`, `prometheus`, `prometheusAgent` or `mimir`.\n\n- `none`: will disable the whole monitoring stack.\n- `prometheus`: will install Prometheus Operator and a preconfigured Prometheus instace, Alertmanager, a set of alert rules, exporters needed to monitor all the components of the cluster, Grafana and a series of dashboards to view the collected metrics, and more.\n- `prometheusAgent`: wil install Prometheus operator, an instance of Prometheus in Agent mode (no alerting, no queries, no storage), and all the exporters needed to get metrics for the status of the cluster and the workloads. Useful when having a centralized (remote) Prometheus where to ship the metrics and not storing them locally in the cluster.\n- `mimir`: will install the same as the `prometheus` option, plus Grafana Mimir that allows for longer retention of metrics and the usage of Object Storage.\n\nDefault is `prometheus`." }, "overrides": { "$ref": "#/$defs/Types.FuryModuleOverrides" @@ -1811,15 +1871,15 @@ }, "retentionTime": { "type": "string", - "description": "The retention time for the k8s Prometheus instance." + "description": "The retention time for the `k8s` Prometheus instance." }, "retentionSize": { "type": "string", - "description": "The retention size for the k8s Prometheus instance." + "description": "The retention size for the `k8s` Prometheus instance." }, "storageSize": { "type": "string", - "description": "The storage size for the k8s Prometheus instance." + "description": "The storage size for the `k8s` Prometheus instance." }, "remoteWrite": { "description": "Set this option to ship the collected metrics to a remote Prometheus receiver.\n\n`remoteWrite` is an array of objects that allows configuring the [remoteWrite](https://prometheus.io/docs/specs/remote_write_spec/) options for Prometheus. The objects in the array follow [the same schema as in the prometheus operator](https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.RemoteWriteSpec).", @@ -1852,15 +1912,15 @@ "properties": { "deadManSwitchWebhookUrl": { "type": "string", - "description": "The webhook url to send deadman switch monitoring, for example to use with healthchecks.io" + "description": "The webhook URL to send dead man's switch monitoring, for example to use with healthchecks.io." }, "installDefaultRules": { "type": "boolean", - "description": "If true, the default rules will be installed" + "description": "Set to false to avoid installing the Prometheus rules (alerts) included with the distribution." }, "slackWebhookUrl": { "type": "string", - "description": "The slack webhook url to send alerts" + "description": "The Slack webhook URL where to send the infrastructural and workload alerts to." } } }, @@ -1911,10 +1971,11 @@ "Spec.Distribution.Modules.Monitoring.Mimir": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Mimir package.", "properties": { "retentionTime": { "type": "string", - "description": "The retention time for the mimir pods" + "description": "The retention time for the logs stored in Mimir. Default is `30d`. Value must match the regular expression `[0-9]+(ns|us|µs|ms|s|m|h|d|w|y)` where y = 365 days." }, "backend": { "type": "string", @@ -1922,31 +1983,32 @@ "minio", "externalEndpoint" ], - "description": "The backend for the mimir pods, must be ***minio*** or ***externalEndpoint***" + "description": "The storage backend type for Mimir. `minio` will use an in-cluster MinIO deployment for object storage, `externalEndpoint` can be used to point to an external S3-compatible object storage instead of deploying an in-cluster MinIO." }, "externalEndpoint": { "type": "object", "additionalProperties": false, + "description": "Configuration for Mimir's external storage backend.", "properties": { "endpoint": { "type": "string", - "description": "The endpoint of the external mimir backend" + "description": "External S3-compatible endpoint for Mimir's storage." }, "insecure": { "type": "boolean", - "description": "If true, the external mimir backend will not use tls" + "description": "If true, will use HTTP as protocol instead of HTTPS." }, "secretAccessKey": { "type": "string", - "description": "The secret access key of the external mimir backend" + "description": "The secret access key (password) for the external S3-compatible bucket." }, "accessKeyId": { "type": "string", - "description": "The access key id of the external mimir backend" + "description": "The access key ID (username) for the external S3-compatible bucket." }, "bucketName": { "type": "string", - "description": "The bucket name of the external mimir backend" + "description": "The bucket name of the external S3-compatible object storage." } } }, @@ -1957,11 +2019,12 @@ }, "Spec.Distribution.Modules.Monitoring.Minio": { "type": "object", + "description": "Configuration for Monitoring's MinIO deployment.", "additionalProperties": false, "properties": { "storageSize": { "type": "string", - "description": "The storage size for the minio pods" + "description": "The PVC size for each MinIO disk, 6 disks total." }, "rootUser": { "type": "object", @@ -1969,11 +2032,11 @@ "properties": { "username": { "type": "string", - "description": "The username for the minio root user" + "description": "The username for the default MinIO root user." }, "password": { "type": "string", - "description": "The password for the minio root user" + "description": "The password for the default MinIO root user." } } }, @@ -1985,6 +2048,7 @@ "Spec.Distribution.Modules.Tracing": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Tracing module.", "properties": { "overrides": { "$ref": "#/$defs/Types.FuryModuleOverrides" @@ -1995,7 +2059,7 @@ "none", "tempo" ], - "description": "The type of tracing to use, either ***none*** or ***tempo***" + "description": "The type of tracing to use, either `none` or `tempo`. `none` will disable the Tracing module and `tempo` will install a Grafana Tempo deployment.\n\nDefault is `tempo`." }, "tempo": { "$ref": "#/$defs/Spec.Distribution.Modules.Tracing.Tempo" @@ -2011,10 +2075,11 @@ "Spec.Distribution.Modules.Tracing.Tempo": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Tempo package.", "properties": { "retentionTime": { "type": "string", - "description": "The retention time for the tempo pods" + "description": "The retention time for the traces stored in Tempo." }, "backend": { "type": "string", @@ -2022,31 +2087,32 @@ "minio", "externalEndpoint" ], - "description": "The backend for the tempo pods, must be ***minio*** or ***externalEndpoint***" + "description": "The storage backend type for Tempo. `minio` will use an in-cluster MinIO deployment for object storage, `externalEndpoint` can be used to point to an external S3-compatible object storage instead of deploying an in-cluster MinIO." }, "externalEndpoint": { + "description": "Configuration for Tempo's external storage backend.", "type": "object", "additionalProperties": false, "properties": { "endpoint": { "type": "string", - "description": "The endpoint of the external tempo backend" + "description": "External S3-compatible endpoint for Tempo's storage." }, "insecure": { "type": "boolean", - "description": "If true, the external tempo backend will not use tls" + "description": "If true, will use HTTP as protocol instead of HTTPS." }, "secretAccessKey": { "type": "string", - "description": "The secret access key of the external tempo backend" + "description": "The secret access key (password) for the external S3-compatible bucket." }, "accessKeyId": { "type": "string", - "description": "The access key id of the external tempo backend" + "description": "The access key ID (username) for the external S3-compatible bucket." }, "bucketName": { "type": "string", - "description": "The bucket name of the external tempo backend" + "description": "The bucket name of the external S3-compatible object storage." } } }, @@ -2057,11 +2123,12 @@ }, "Spec.Distribution.Modules.Tracing.Minio": { "type": "object", + "description": "Configuration for Tracing's MinIO deployment.", "additionalProperties": false, "properties": { "storageSize": { "type": "string", - "description": "The storage size for the minio pods" + "description": "The PVC size for each MinIO disk, 6 disks total." }, "rootUser": { "type": "object", @@ -2069,11 +2136,11 @@ "properties": { "username": { "type": "string", - "description": "The username for the minio root user" + "description": "The username for the default MinIO root user." }, "password": { "type": "string", - "description": "The password for the minio root user" + "description": "The password for the default MinIO root user." } } }, @@ -2085,9 +2152,10 @@ "Spec.Distribution.Modules.Networking": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Networking module.", "properties": { "overrides": { - "$ref": "#/$defs/Types.FuryModuleComponentOverrides" + "$ref": "#/$defs/Types.FuryModuleOverrides" }, "tigeraOperator": { "$ref": "#/$defs/Spec.Distribution.Modules.Networking.TigeraOperator" @@ -2106,6 +2174,7 @@ "Spec.Distribution.Modules.Policy": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Policy module.", "properties": { "overrides": { "$ref": "#/$defs/Types.FuryModuleOverrides" @@ -2117,7 +2186,7 @@ "gatekeeper", "kyverno" ], - "description": "The type of security to use, either ***none***, ***gatekeeper*** or ***kyverno***" + "description": "The type of policy enforcement to use, either `none`, `gatekeeper` or `kyverno`.\n\nDefault is `none`." }, "gatekeeper": { "$ref": "#/$defs/Spec.Distribution.Modules.Policy.Gatekeeper" @@ -2163,6 +2232,7 @@ "Spec.Distribution.Modules.Policy.Gatekeeper": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Gatekeeper package.", "properties": { "additionalExcludedNamespaces": { "type": "array", @@ -2178,11 +2248,11 @@ "dryrun", "warn" ], - "description": "The enforcement action to use for the gatekeeper module" + "description": "The default enforcement action to use for the included constraints. `deny` will block the admission when violations to the policies are found, `warn` will show a message to the user but will admit the violating requests and `dryrun` won't give any feedback to the user but it will log the violations." }, "installDefaultPolicies": { "type": "boolean", - "description": "If true, the default policies will be installed" + "description": "Set to `false` to avoid installing the default Gatekeeper policies (constraints templates and constraints) included with the distribution." }, "overrides": { "$ref": "#/$defs/Types.FuryModuleComponentOverrides" @@ -2196,13 +2266,14 @@ "Spec.Distribution.Modules.Policy.Kyverno": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Kyverno package.", "properties": { "additionalExcludedNamespaces": { "type": "array", "items": { "type": "string" }, - "description": "This parameter adds namespaces to Kyverno's exemption list, so it will not enforce the constraints on them." + "description": "This parameter adds namespaces to Kyverno's exemption list, so it will not enforce the policies on them." }, "validationFailureAction": { "type": "string", @@ -2210,11 +2281,11 @@ "Audit", "Enforce" ], - "description": "The validation failure action to use for the kyverno module" + "description": "The validation failure action to use for the policies, `Enforce` will block when a request does not comply with the policies and `Audit` will not block but log when a request does not comply with the policies." }, "installDefaultPolicies": { "type": "boolean", - "description": "If true, the default policies will be installed" + "description": "Set to `false` to avoid installing the default Kyverno policies included with distribution." }, "overrides": { "$ref": "#/$defs/Types.FuryModuleComponentOverrides" @@ -2228,6 +2299,7 @@ "Spec.Distribution.Modules.Dr": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Disaster Recovery module.", "properties": { "overrides": { "$ref": "#/$defs/Types.FuryModuleOverrides" @@ -2238,7 +2310,7 @@ "none", "eks" ], - "description": "The type of the DR, must be ***none*** or ***eks***" + "description": "The type of the Disaster Recovery, must be `none` or `eks`. `none` disables the module and `eks` will install Velero and use an S3 bucket to store the backups.\n\nDefault is `none`." }, "velero": { "$ref": "#/$defs/Spec.Distribution.Modules.Dr.Velero" @@ -2334,12 +2406,12 @@ "properties": { "region": { "$ref": "#/$defs/Types.AwsRegion", - "description": "The region where the velero bucket is located" + "description": "The region where the bucket for Velero will be located." }, "bucketName": { "$ref": "#/$defs/Types.AwsS3BucketName", "maxLength": 49, - "description": "The name of the velero bucket" + "description": "The name of the bucket for Velero." } }, "required": [ @@ -2350,6 +2422,7 @@ "Spec.Distribution.Modules.Auth": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Auth module.", "properties": { "overrides": { "$ref": "#/$defs/Spec.Distribution.Modules.Auth.Overrides" @@ -2359,7 +2432,7 @@ }, "baseDomain": { "type": "string", - "description": "The base domain for the auth module" + "description": "Base domain for the ingresses created by the Auth module (Gangplank, Pomerium, Dex). Notice that when nginx type is dual, these will use the `external` ingress class." }, "pomerium": { "$ref": "#/$defs/Spec.Distribution.Modules.Auth.Pomerium" @@ -2438,10 +2511,11 @@ "Spec.Distribution.Modules.Auth.Overrides": { "type": "object", "additionalProperties": false, + "description": "Override the common configuration with a particular configuration for the Auth module.", "properties": { "nodeSelector": { "$ref": "#/$defs/Types.KubeNodeSelector", - "description": "The node selector to use to place the pods for the auth module" + "description": "Set to override the node selector used to place the pods of the Auth module." }, "tolerations": { "type": [ @@ -2451,10 +2525,11 @@ "items": { "$ref": "#/$defs/Types.KubeToleration" }, - "description": "The tolerations that will be added to the pods for the auth module" + "description": "Set to override the tolerations that will be added to the pods of the Auth module." }, "ingresses": { "type": "object", + "description": "Override the definition of the Auth module ingresses.", "additionalProperties": { "$ref": "#/$defs/Spec.Distribution.Modules.Auth.Overrides.Ingress" } @@ -2467,11 +2542,11 @@ "properties": { "host": { "type": "string", - "description": "The host of the ingress" + "description": "Use this host for the ingress instead of the default one." }, "ingressClass": { "type": "string", - "description": "The ingress class of the ingress" + "description": "Use this ingress class for the ingress instead of the default one." } }, "required": [ @@ -2490,7 +2565,7 @@ "basicAuth", "sso" ], - "description": "The type of the provider, must be ***none***, ***sso*** or ***basicAuth***" + "description": "The type of the Auth provider, options are:\n- `none`: will disable authentication in the infrastructural ingresses.\n- `sso`: will protect the infrastructural ingresses with Pomerium and Dex (SSO) and require authentication before accessing them.\n- `basicAuth`: will protect the infrastructural ingresses with HTTP basic auth (username and password) authentication.\n\nDefault is `none`." }, "basicAuth": { "$ref": "#/$defs/Spec.Distribution.Modules.Auth.Provider.BasicAuth" @@ -2503,14 +2578,15 @@ "Spec.Distribution.Modules.Auth.Provider.BasicAuth": { "type": "object", "additionalProperties": false, + "description": "Configuration for the HTTP Basic Auth provider.", "properties": { "username": { "type": "string", - "description": "The username for the basic auth" + "description": "The username for logging in with the HTTP basic authentication." }, "password": { "type": "string", - "description": "The password for the basic auth" + "description": "The password for logging in with the HTTP basic authentication." } }, "required": [ @@ -2524,14 +2600,15 @@ "Spec.Distribution.Modules.Auth.Dex": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Dex package.", "properties": { "connectors": { "type": "array", - "description": "The connectors for dex" + "description": "A list with each item defining a Dex connector. Follows Dex connectors configuration format: https://dexidp.io/docs/connectors/" }, "additionalStaticClients": { "type": "array", - "description": "The additional static clients for dex" + "description": "Additional static clients defitions that will be added to the default clients included with the distribution in Dex's configuration. Example:\n\n```yaml\nadditionalStaticClients:\n - id: my-custom-client\n name: \"A custom additional static client\"\n redirectURIs:\n - \"https://myapp.tld/redirect\"\n - \"https://alias.tld/oidc-callback\"\n secret: supersecretpassword\n```\nReference: https://dexidp.io/docs/connectors/local/" }, "expiry": { "type": "object", @@ -2810,11 +2887,11 @@ "properties": { "cpu": { "type": "string", - "description": "The cpu request for the prometheus pods" + "description": "The CPU request for the Pod, in cores. Example: `500m`." }, "memory": { "type": "string", - "description": "The memory request for the opensearch pods" + "description": "The memory request for the Pod. Example: `500M`." } } }, @@ -2824,11 +2901,11 @@ "properties": { "cpu": { "type": "string", - "description": "The cpu limit for the opensearch pods" + "description": "The CPU limit for the Pod. Example: `1000m`." }, "memory": { "type": "string", - "description": "The memory limit for the opensearch pods" + "description": "The memory limit for the Pod. Example: `1G`." } } } @@ -2836,11 +2913,12 @@ }, "Types.FuryModuleOverrides": { "type": "object", + "description": "Override the common configuration with a particular configuration for the module.", "additionalProperties": false, "properties": { "nodeSelector": { "$ref": "#/$defs/Types.KubeNodeSelector", - "description": "The node selector to use to place the pods for the dr module" + "description": "Set to override the node selector used to place the pods of the module." }, "tolerations": { "type": [ @@ -2850,7 +2928,7 @@ "items": { "$ref": "#/$defs/Types.KubeToleration" }, - "description": "The tolerations that will be added to the pods for the monitoring module" + "description": "Set to override the tolerations that will be added to the pods of the module." }, "ingresses": { "type": "object", @@ -2866,7 +2944,7 @@ "properties": { "nodeSelector": { "$ref": "#/$defs/Types.KubeNodeSelector", - "description": "The node selector to use to place the pods for the minio module" + "description": "Set to override the node selector used to place the pods of the package." }, "tolerations": { "type": [ @@ -2876,7 +2954,7 @@ "items": { "$ref": "#/$defs/Types.KubeToleration" }, - "description": "The tolerations that will be added to the pods for the cert-manager module" + "description": "Set to override the tolerations that will be added to the pods of the package." } } }, @@ -2886,7 +2964,7 @@ "properties": { "nodeSelector": { "$ref": "#/$defs/Types.KubeNodeSelector", - "description": "The node selector to use to place the pods for the load balancer controller module" + "description": "The node selector to use to place the pods for the load balancer controller module." }, "tolerations": { "type": [ @@ -2896,7 +2974,7 @@ "items": { "$ref": "#/$defs/Types.KubeToleration" }, - "description": "The tolerations that will be added to the pods for the cluster autoscaler module" + "description": "The tolerations that will be added to the pods for the cluster autoscaler module." }, "iamRoleName": { "$ref": "#/$defs/Types.AwsIamRoleName" @@ -2909,15 +2987,15 @@ "properties": { "disableAuth": { "type": "boolean", - "description": "If true, the ingress will not have authentication" + "description": "If true, the ingress will not have authentication even if `.spec.modules.auth.provider.type` is SSO or Basic Auth." }, "host": { "type": "string", - "description": "The host of the ingress" + "description": "Use this host for the ingress instead of the default one." }, "ingressClass": { "type": "string", - "description": "The ingress class of the ingress" + "description": "Use this ingress class for the ingress instead of the default one." } } } diff --git a/schemas/public/kfddistribution-kfd-v1alpha2.json b/schemas/public/kfddistribution-kfd-v1alpha2.json index cd7c39b75..80cf9d6b9 100644 --- a/schemas/public/kfddistribution-kfd-v1alpha2.json +++ b/schemas/public/kfddistribution-kfd-v1alpha2.json @@ -1,6 +1,6 @@ { "$schema": "http://json-schema.org/draft-07/schema#", - "description": "", + "description": "KFD modules deployed on top of an existing Kubernetes cluster.", "type": "object", "properties": { "apiVersion": { @@ -34,6 +34,7 @@ "properties": { "name": { "type": "string", + "description": "The name of the cluster. It will also be used as a prefix for all the other resources created.", "minLength": 1, "maxLength": 56 } @@ -48,6 +49,7 @@ "properties": { "distributionVersion": { "type": "string", + "description": "Defines which KFD version will be installed and, in consequence, the Kubernetes version used to create the cluster. It supports git tags and branches. Example: `v1.30.1`.", "minLength": 1 }, "distribution": { @@ -68,7 +70,7 @@ "properties": { "kubeconfig": { "type": "string", - "description": "The kubeconfig file path" + "description": "The path to the kubeconfig file." }, "common": { "$ref": "#/$defs/Spec.Distribution.Common" @@ -134,28 +136,29 @@ "Spec.Distribution.Common": { "type": "object", "additionalProperties": false, + "description": "Common configuration for all the distribution modules.", "properties": { "nodeSelector": { "$ref": "#/$defs/Types.KubeNodeSelector", - "description": "The node selector to use to place the pods for all the KFD modules" + "description": "The node selector to use to place the pods for all the KFD modules. Follows Kubernetes selector format. Example: `node.kubernetes.io/role: infra`." }, "tolerations": { "type": "array", "items": { "$ref": "#/$defs/Types.KubeToleration" }, - "description": "The tolerations that will be added to the pods for all the KFD modules" + "description": "An array with the tolerations that will be added to the pods for all the KFD modules. Follows Kubernetes tolerations format. Example:\n\n```yaml\n- effect: NoSchedule\n key: node.kubernetes.io/role\n value: infra\n```" }, "provider": { "$ref": "#/$defs/Spec.Distribution.Common.Provider" }, "relativeVendorPath": { "type": "string", - "description": "The relative path to the vendor directory, does not need to be changed" + "description": "The relative path to the vendor directory, does not need to be changed." }, "registry": { "type": "string", - "description": "URL of the registry where to pull images from for the Distribution phase. (Default is registry.sighup.io/fury).\n\nNOTE: If plugins are pulling from the default registry, the registry will be replaced for the plugin too." + "description": "URL of the registry where to pull images from for the Distribution phase. (Default is `registry.sighup.io/fury`).\n\nNOTE: If plugins are pulling from the default registry, the registry will be replaced for the plugin too." } } }, @@ -165,7 +168,7 @@ "properties": { "type": { "type": "string", - "description": "The type of the provider" + "description": "The provider type. Don't set. FOR INTERNAL USE ONLY." } }, "required": [ @@ -217,14 +220,15 @@ }, "baseDomain": { "type": "string", - "description": "the base domain used for all the KFD ingresses, if in the nginx dual configuration, it should be the same as the .spec.distribution.modules.ingress.dns.private.name zone" + "description": "The base domain used for all the KFD infrastructural ingresses. If using the nginx dual type, this value should be the same as the domain associated with the `internal` ingress class." }, "nginx": { "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.Nginx", - "description": "Configurations for the nginx ingress controller module" + "description": "Configurations for the Ingress nginx controller package." }, "certManager": { - "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.CertManager" + "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.CertManager", + "description": "Configuration for the cert-manager package. Required even if `ingress.nginx.type` is `none`, cert-manager is used for managing other certificates in the cluster besides the TLS termination certificates for the ingresses." }, "forecastle": { "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.Forecastle" @@ -258,20 +262,21 @@ "Spec.Distribution.Modules.Ingress.Overrides": { "type": "object", "additionalProperties": false, + "description": "Override the common configuration with a particular configuration for the Ingress module.", "properties": { "ingresses": { "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.Overrides.Ingresses" }, "nodeSelector": { "$ref": "#/$defs/Types.KubeNodeSelector", - "description": "The node selector to use to place the pods for the ingress module" + "description": "Set to override the node selector used to place the pods of the Ingress module." }, "tolerations": { "type": "array", "items": { "$ref": "#/$defs/Types.KubeToleration" }, - "description": "The tolerations that will be added to the pods for the ingress module" + "description": "Set to override the tolerations that will be added to the pods of the Ingress module." } } }, @@ -304,7 +309,7 @@ "single", "dual" ], - "description": "The type of the nginx ingress controller, must be ***none***, ***single*** or ***dual***" + "description": "The type of the Ingress nginx controller, options are:\n- `none`: no ingress controller will be installed and no infrastructural ingresses will be created.\n- `single`: a single ingress controller with ingress class `nginx` will be installed to manage all the ingress resources, infrastructural ingresses will be created.\n- `dual`: two independent ingress controllers will be installed, one for the `internal` ingress class intended for private ingresses and one for the `external` ingress class intended for public ingresses. KFD infrastructural ingresses wil use the `internal` ingress class when using the dual type.\n\nDefault is `single`." }, "tls": { "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.Nginx.TLS" @@ -328,7 +333,7 @@ "secret", "none" ], - "description": "The provider of the TLS certificate, must be ***none***, ***certManager*** or ***secret***" + "description": "The provider of the TLS certificates for the ingresses, one of: `none`, `certManager`, or `secret`." }, "secret": { "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.Nginx.TLS.Secret" @@ -353,16 +358,19 @@ "Spec.Distribution.Modules.Ingress.Nginx.TLS.Secret": { "type": "object", "additionalProperties": false, + "description": "Kubernetes TLS secret for the ingresses TLS certificate.", "properties": { "cert": { "type": "string", - "description": "The certificate file content or you can use the file notation to get the content from a file" + "description": "The certificate file's content. You can use the `\"{file://}\"` notation to get the content from a file." }, "key": { - "type": "string" + "type": "string", + "description": "The signing key file's content. You can use the `\"{file://}\"` notation to get the content from a file." }, "ca": { - "type": "string" + "type": "string", + "description": "The Certificate Authority certificate file's content. You can use the `\"{file://}\"` notation to get the content from a file." } }, "required": [ @@ -374,6 +382,7 @@ "Spec.Distribution.Modules.Ingress.CertManager": { "type": "object", "additionalProperties": false, + "description": "Configuration for the cert-manager package. Required even if `ingress.nginx.type` is `none`, cert-manager is used for managing other certificates in the cluster besides the TLS termination certificates for the ingresses.", "properties": { "clusterIssuer": { "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.CertManager.ClusterIssuer" @@ -389,26 +398,27 @@ "Spec.Distribution.Modules.Ingress.CertManager.ClusterIssuer": { "type": "object", "additionalProperties": false, + "description": "Configuration for the cert-manager's ACME clusterIssuer used to request certificates from Let's Encrypt.", "properties": { "name": { "type": "string", - "description": "The name of the cluster issuer" + "description": "Name of the clusterIssuer." }, "email": { "type": "string", "format": "email", - "description": "The email of the cluster issuer" + "description": "The email address to use during the certificate issuing process." }, "type": { "type": "string", "enum": [ "http01" ], - "description": "The type of the cluster issuer, must be ***http01***" + "description": "The type of the clusterIssuer. Only `http01` challenge is supported for KFDDistribution kind. See solvers for arbitrary configurations." }, "solvers": { "type": "array", - "description": "The custom solvers configurations" + "description": "List of challenge solvers to use instead of the default one for the `http01` challenge." } }, "required": [ @@ -431,6 +441,7 @@ "Spec.Distribution.Modules.Logging": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Logging module.", "properties": { "overrides": { "$ref": "#/$defs/Types.FuryModuleOverrides" @@ -443,7 +454,7 @@ "loki", "customOutputs" ], - "description": "selects the logging stack. Choosing none will disable the centralized logging. Choosing opensearch will deploy and configure the Logging Operator and an OpenSearch cluster (can be single or triple for HA) where the logs will be stored. Choosing loki will use a distributed Grafana Loki instead of OpenSearh for storage. Choosing customOuput the Logging Operator will be deployed and installed but with no local storage, you will have to create the needed Outputs and ClusterOutputs to ship the logs to your desired storage." + "description": "Selects the logging stack. Options are:\n- `none`: will disable the centralized logging.\n- `opensearch`: will deploy and configure the Logging Operator and an OpenSearch cluster (can be single or triple for HA) where the logs will be stored.\n- `loki`: will use a distributed Grafana Loki instead of OpenSearh for storage.\n- `customOuputs`: the Logging Operator will be deployed and installed but with no local storage, you will have to create the needed Outputs and ClusterOutputs to ship the logs to your desired storage.\n\nDefault is `opensearch`." }, "opensearch": { "$ref": "#/$defs/Spec.Distribution.Modules.Logging.Opensearch" @@ -522,14 +533,14 @@ "single", "triple" ], - "description": "The type of the opensearch, must be ***single*** or ***triple***" + "description": "The type of OpenSearch deployment. One of: `single` for a single replica or `triple` for an HA 3-replicas deployment." }, "resources": { "$ref": "#/$defs/Types.KubeResources" }, "storageSize": { "type": "string", - "description": "The storage size for the opensearch pods" + "description": "The storage size for the OpenSearch volumes." }, "overrides": { "$ref": "#/$defs/Types.FuryModuleComponentOverrides" @@ -541,6 +552,7 @@ }, "Spec.Distribution.Modules.Logging.Cerebro": { "type": "object", + "description": "DEPRECATED since KFD v1.26.6, 1.27.5, v1.28.0.", "additionalProperties": false, "properties": { "overrides": { @@ -551,10 +563,11 @@ "Spec.Distribution.Modules.Logging.Minio": { "type": "object", "additionalProperties": false, + "description": "Configuration for Logging's MinIO deployment.", "properties": { "storageSize": { "type": "string", - "description": "The PVC size for each minio disk, 6 disks total" + "description": "The PVC size for each MinIO disk, 6 disks total." }, "rootUser": { "type": "object", @@ -562,11 +575,11 @@ "properties": { "username": { "type": "string", - "description": "The username of the minio root user" + "description": "The username for the default MinIO root user." }, "password": { "type": "string", - "description": "The password of the minio root user" + "description": "The password for the default MinIO root user." } } }, @@ -577,10 +590,12 @@ }, "Spec.Distribution.Modules.Logging.Loki": { "type": "object", + "description": "Configuration for the Loki package.", "additionalProperties": false, "properties": { "backend": { "type": "string", + "description": "The storage backend type for Loki. `minio` will use an in-cluster MinIO deployment for object storage, `externalEndpoint` can be used to point to an external object storage instead of deploying an in-cluster MinIO.", "enum": [ "minio", "externalEndpoint" @@ -589,26 +604,27 @@ "externalEndpoint": { "type": "object", "additionalProperties": false, + "description": "Configuration for Loki's external storage backend.", "properties": { "endpoint": { "type": "string", - "description": "The endpoint of the loki external endpoint" + "description": "External S3-compatible endpoint for Loki's storage." }, "insecure": { "type": "boolean", - "description": "If true, the loki external endpoint will be insecure" + "description": "If true, will use HTTP as protocol instead of HTTPS." }, "secretAccessKey": { "type": "string", - "description": "The secret access key of the loki external endpoint" + "description": "The secret access key (password) for the external S3-compatible bucket." }, "accessKeyId": { "type": "string", - "description": "The access key id of the loki external endpoint" + "description": "The access key ID (username) for the external S3-compatible bucket." }, "bucketName": { "type": "string", - "description": "The bucket name of the loki external endpoint" + "description": "The bucket name of the external S3-compatible object storage." } } }, @@ -628,6 +644,7 @@ "Spec.Distribution.Modules.Logging.Operator": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Logging Operator.", "properties": { "overrides": { "$ref": "#/$defs/Types.FuryModuleComponentOverrides" @@ -635,41 +652,41 @@ } }, "Spec.Distribution.Modules.Logging.CustomOutputs": { - "description": "when using the customOutputs logging type, you need to manually specify the spec of the several Output and ClusterOutputs that the Logging Operator expects to forward the logs collected by the pre-defined flows.", + "description": "When using the `customOutputs` logging type, you need to manually specify the spec of the several `Output` and `ClusterOutputs` that the Logging Operator expects to forward the logs collected by the pre-defined flows.", "type": "object", "additionalProperties": false, "properties": { "audit": { "type": "string", - "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow." + "description": "This value defines where the output from the `audit` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`" }, "events": { "type": "string", - "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow." + "description": "This value defines where the output from the `events` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`" }, "infra": { "type": "string", - "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow." + "description": "This value defines where the output from the `infra` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`" }, "ingressNginx": { "type": "string", - "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow." + "description": "This value defines where the output from the `ingressNginx` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`" }, "kubernetes": { "type": "string", - "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow." + "description": "This value defines where the output from the `kubernetes` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`" }, "systemdCommon": { "type": "string", - "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow." + "description": "This value defines where the output from the `systemdCommon` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`" }, "systemdEtcd": { "type": "string", - "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow." + "description": "This value defines where the output from the `systemdEtcd` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`" }, "errors": { "type": "string", - "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow." + "description": "This value defines where the output from the `errors` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`" } }, "required": [ @@ -686,7 +703,7 @@ "Spec.Distribution.Modules.Monitoring": { "type": "object", "additionalProperties": false, - "description": "configuration for the Monitoring module components", + "description": "Configuration for the Monitoring module.", "properties": { "type": { "type": "string", @@ -696,7 +713,7 @@ "prometheusAgent", "mimir" ], - "description": "The type of the monitoring, must be ***none***, ***prometheus***, ***prometheusAgent*** or ***mimir***.\n\n- `none`: will disable the whole monitoring stack.\n- `prometheus`: will install Prometheus Operator and a preconfigured Prometheus instace, Alertmanager, a set of alert rules, exporters needed to monitor all the components of the cluster, Grafana and a series of dashboards to view the collected metrics, and more.\n- `prometheusAgent`: wil install Prometheus operator, an instance of Prometheus in Agent mode (no alerting, no queries, no storage), and all the exporters needed to get metrics for the status of the cluster and the workloads. Useful when having a centralized (remote) Prometheus where to ship the metrics and not storing them locally in the cluster.\n- `mimir`: will install the same as the `prometheus` option, and in addition Grafana Mimir that allows for longer retention of metrics and the usage of Object Storage." + "description": "The type of the monitoring, must be `none`, `prometheus`, `prometheusAgent` or `mimir`.\n\n- `none`: will disable the whole monitoring stack.\n- `prometheus`: will install Prometheus Operator and a preconfigured Prometheus instace, Alertmanager, a set of alert rules, exporters needed to monitor all the components of the cluster, Grafana and a series of dashboards to view the collected metrics, and more.\n- `prometheusAgent`: wil install Prometheus operator, an instance of Prometheus in Agent mode (no alerting, no queries, no storage), and all the exporters needed to get metrics for the status of the cluster and the workloads. Useful when having a centralized (remote) Prometheus where to ship the metrics and not storing them locally in the cluster.\n- `mimir`: will install the same as the `prometheus` option, plus Grafana Mimir that allows for longer retention of metrics and the usage of Object Storage.\n\nDefault is `prometheus`." }, "overrides": { "$ref": "#/$defs/Types.FuryModuleOverrides" @@ -742,15 +759,15 @@ }, "retentionTime": { "type": "string", - "description": "The retention time for the K8s Prometheus instance." + "description": "The retention time for the `k8s` Prometheus instance." }, "retentionSize": { "type": "string", - "description": "The retention size for the k8s Prometheus instance." + "description": "The retention size for the `k8s` Prometheus instance." }, "storageSize": { "type": "string", - "description": "The storage size for the k8s Prometheus instance." + "description": "The storage size for the `k8s` Prometheus instance." }, "remoteWrite": { "description": "Set this option to ship the collected metrics to a remote Prometheus receiver.\n\n`remoteWrite` is an array of objects that allows configuring the [remoteWrite](https://prometheus.io/docs/specs/remote_write_spec/) options for Prometheus. The objects in the array follow [the same schema as in the prometheus operator](https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.RemoteWriteSpec).", @@ -783,15 +800,15 @@ "properties": { "deadManSwitchWebhookUrl": { "type": "string", - "description": "The webhook url to send deadman switch monitoring, for example to use with healthchecks.io" + "description": "The webhook URL to send dead man's switch monitoring, for example to use with healthchecks.io." }, "installDefaultRules": { "type": "boolean", - "description": "If true, the default rules will be installed" + "description": "Set to false to avoid installing the Prometheus rules (alerts) included with the distribution." }, "slackWebhookUrl": { "type": "string", - "description": "The slack webhook url to send alerts" + "description": "The Slack webhook URL where to send the infrastructural and workload alerts to." } } }, @@ -842,10 +859,11 @@ "Spec.Distribution.Modules.Monitoring.Mimir": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Mimir package.", "properties": { "retentionTime": { "type": "string", - "description": "The retention time for the mimir pods" + "description": "The retention time for the logs stored in Mimir. Default is `30d`. Value must match the regular expression `[0-9]+(ns|us|µs|ms|s|m|h|d|w|y)` where y = 365 days." }, "backend": { "type": "string", @@ -853,31 +871,32 @@ "minio", "externalEndpoint" ], - "description": "The backend for the mimir pods, must be ***minio*** or ***externalEndpoint***" + "description": "The storage backend type for Mimir. `minio` will use an in-cluster MinIO deployment for object storage, `externalEndpoint` can be used to point to an external S3-compatible object storage instead of deploying an in-cluster MinIO." }, "externalEndpoint": { "type": "object", "additionalProperties": false, + "description": "Configuration for Mimir's external storage backend.", "properties": { "endpoint": { "type": "string", - "description": "The endpoint of the external mimir backend" + "description": "External S3-compatible endpoint for Mimir's storage." }, "insecure": { "type": "boolean", - "description": "If true, the external mimir backend will not use tls" + "description": "If true, will use HTTP as protocol instead of HTTPS." }, "secretAccessKey": { "type": "string", - "description": "The secret access key of the external mimir backend" + "description": "The secret access key (password) for the external S3-compatible bucket." }, "accessKeyId": { "type": "string", - "description": "The access key id of the external mimir backend" + "description": "The access key ID (username) for the external S3-compatible bucket." }, "bucketName": { "type": "string", - "description": "The bucket name of the external mimir backend" + "description": "The bucket name of the external S3-compatible object storage." } } }, @@ -888,11 +907,12 @@ }, "Spec.Distribution.Modules.Monitoring.Minio": { "type": "object", + "description": "Configuration for Monitoring's MinIO deployment.", "additionalProperties": false, "properties": { "storageSize": { "type": "string", - "description": "The storage size for the minio pods" + "description": "The PVC size for each MinIO disk, 6 disks total." }, "rootUser": { "type": "object", @@ -900,11 +920,11 @@ "properties": { "username": { "type": "string", - "description": "The username for the minio root user" + "description": "The username for the default MinIO root user." }, "password": { "type": "string", - "description": "The password for the minio root user" + "description": "The password for the default MinIO root user." } } }, @@ -916,6 +936,7 @@ "Spec.Distribution.Modules.Tracing": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Tracing module.", "properties": { "overrides": { "$ref": "#/$defs/Types.FuryModuleOverrides" @@ -926,7 +947,7 @@ "none", "tempo" ], - "description": "The type of tracing to use, either ***none*** or ***tempo***" + "description": "The type of tracing to use, either `none` or `tempo`. `none` will disable the Tracing module and `tempo` will install a Grafana Tempo deployment.\n\nDefault is `tempo`." }, "tempo": { "$ref": "#/$defs/Spec.Distribution.Modules.Tracing.Tempo" @@ -942,10 +963,11 @@ "Spec.Distribution.Modules.Tracing.Tempo": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Tempo package.", "properties": { "retentionTime": { "type": "string", - "description": "The retention time for the tempo pods" + "description": "The retention time for the traces stored in Tempo." }, "backend": { "type": "string", @@ -953,31 +975,32 @@ "minio", "externalEndpoint" ], - "description": "The backend for the tempo pods, must be ***minio*** or ***externalEndpoint***" + "description": "The storage backend type for Tempo. `minio` will use an in-cluster MinIO deployment for object storage, `externalEndpoint` can be used to point to an external S3-compatible object storage instead of deploying an in-cluster MinIO." }, "externalEndpoint": { + "description": "Configuration for Tempo's external storage backend.", "type": "object", "additionalProperties": false, "properties": { "endpoint": { "type": "string", - "description": "The endpoint of the external tempo backend" + "description": "External S3-compatible endpoint for Tempo's storage." }, "insecure": { "type": "boolean", - "description": "If true, the external tempo backend will not use tls" + "description": "If true, will use HTTP as protocol instead of HTTPS." }, "secretAccessKey": { "type": "string", - "description": "The secret access key of the external tempo backend" + "description": "The secret access key (password) for the external S3-compatible bucket." }, "accessKeyId": { "type": "string", - "description": "The access key id of the external tempo backend" + "description": "The access key ID (username) for the external S3-compatible bucket." }, "bucketName": { "type": "string", - "description": "The bucket name of the external tempo backend" + "description": "The bucket name of the external S3-compatible object storage." } } }, @@ -988,11 +1011,12 @@ }, "Spec.Distribution.Modules.Tracing.Minio": { "type": "object", + "description": "Configuration for Tracing's MinIO deployment.", "additionalProperties": false, "properties": { "storageSize": { "type": "string", - "description": "The storage size for the minio pods" + "description": "The PVC size for each MinIO disk, 6 disks total." }, "rootUser": { "type": "object", @@ -1000,11 +1024,11 @@ "properties": { "username": { "type": "string", - "description": "The username for the minio root user" + "description": "The username for the default MinIO root user." }, "password": { "type": "string", - "description": "The password for the minio root user" + "description": "The password for the default MinIO root user." } } }, @@ -1016,6 +1040,7 @@ "Spec.Distribution.Modules.Networking": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Networking module.", "properties": { "overrides": { "$ref": "#/$defs/Types.FuryModuleOverrides" @@ -1033,7 +1058,7 @@ "calico", "cilium" ], - "description": "The type of networking to use, either ***none***, ***calico*** or ***cilium***" + "description": "The type of CNI plugin to use, either `none`, `calico` (Tigera Operator) or `cilium`." } }, "required": [ @@ -1070,10 +1095,12 @@ "additionalProperties": false, "properties": { "podCidr": { - "$ref": "#/$defs/Types.Cidr" + "$ref": "#/$defs/Types.Cidr", + "description": "Allows specifing a CIDR for the Pods network different from `.spec.kubernetes.podCidr`. If not set the default is to use `.spec.kubernetes.podCidr`." }, "maskSize": { - "type": "string" + "type": "string", + "description": "The mask size to use for the Pods network on each node." }, "overrides": { "$ref": "#/$defs/Types.FuryModuleComponentOverrides" @@ -1087,6 +1114,7 @@ "Spec.Distribution.Modules.Policy": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Policy module.", "properties": { "overrides": { "$ref": "#/$defs/Types.FuryModuleOverrides" @@ -1098,7 +1126,7 @@ "gatekeeper", "kyverno" ], - "description": "The type of security to use, either ***none***, ***gatekeeper*** or ***kyverno***" + "description": "The type of policy enforcement to use, either `none`, `gatekeeper` or `kyverno`.\n\nDefault is `none`." }, "gatekeeper": { "$ref": "#/$defs/Spec.Distribution.Modules.Policy.Gatekeeper" @@ -1144,6 +1172,7 @@ "Spec.Distribution.Modules.Policy.Gatekeeper": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Gatekeeper package.", "properties": { "additionalExcludedNamespaces": { "type": "array", @@ -1159,11 +1188,11 @@ "dryrun", "warn" ], - "description": "The enforcement action to use for the gatekeeper module" + "description": "The default enforcement action to use for the included constraints. `deny` will block the admission when violations to the policies are found, `warn` will show a message to the user but will admit the violating requests and `dryrun` won't give any feedback to the user but it will log the violations." }, "installDefaultPolicies": { "type": "boolean", - "description": "If true, the default policies will be installed" + "description": "Set to `false` to avoid installing the default Gatekeeper policies (constraints templates and constraints) included with the distribution." }, "overrides": { "$ref": "#/$defs/Types.FuryModuleComponentOverrides" @@ -1177,13 +1206,14 @@ "Spec.Distribution.Modules.Policy.Kyverno": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Kyverno package.", "properties": { "additionalExcludedNamespaces": { "type": "array", "items": { "type": "string" }, - "description": "This parameter adds namespaces to Kyverno's exemption list, so it will not enforce the constraints on them." + "description": "This parameter adds namespaces to Kyverno's exemption list, so it will not enforce the policies on them." }, "validationFailureAction": { "type": "string", @@ -1191,11 +1221,11 @@ "Audit", "Enforce" ], - "description": "The validation failure action to use for the kyverno module" + "description": "The validation failure action to use for the policies, `Enforce` will block when a request does not comply with the policies and `Audit` will not block but log when a request does not comply with the policies." }, "installDefaultPolicies": { "type": "boolean", - "description": "If true, the default policies will be installed" + "description": "Set to `false` to avoid installing the default Kyverno policies included with distribution." }, "overrides": { "$ref": "#/$defs/Types.FuryModuleComponentOverrides" @@ -1209,6 +1239,7 @@ "Spec.Distribution.Modules.Dr": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Disaster Recovery module.", "properties": { "overrides": { "$ref": "#/$defs/Types.FuryModuleOverrides" @@ -1219,7 +1250,7 @@ "none", "on-premises" ], - "description": "The type of the DR, must be ***none*** or ***on-premises***" + "description": "The type of the Disaster Recovery, must be `none` or `on-premises`. `none` disables the module and `on-premises` will install Velero and an optional MinIO deployment.\n\nDefault is `none`." }, "velero": { "$ref": "#/$defs/Spec.Distribution.Modules.Dr.Velero" @@ -1245,6 +1276,7 @@ "Spec.Distribution.Modules.Dr.Velero": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Velero package.", "properties": { "backend": { "type": "string", @@ -1352,6 +1384,7 @@ "Spec.Distribution.Modules.Auth": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Auth module.", "properties": { "overrides": { "$ref": "#/$defs/Spec.Distribution.Modules.Auth.Overrides" @@ -1361,7 +1394,7 @@ }, "baseDomain": { "type": "string", - "description": "The base domain for the auth module" + "description": "Base domain for the ingresses created by the Auth module (Gangplank, Pomerium, Dex). Notice that when nginx type is dual, these will use the `external` ingress class." }, "pomerium": { "$ref": "#/$defs/Spec.Distribution.Modules.Auth.Pomerium" @@ -1440,10 +1473,11 @@ "Spec.Distribution.Modules.Auth.Overrides": { "type": "object", "additionalProperties": false, + "description": "Override the common configuration with a particular configuration for the Auth module.", "properties": { "nodeSelector": { "$ref": "#/$defs/Types.KubeNodeSelector", - "description": "The node selector to use to place the pods for the auth module" + "description": "Set to override the node selector used to place the pods of the Auth module." }, "tolerations": { "type": [ @@ -1453,7 +1487,7 @@ "items": { "$ref": "#/$defs/Types.KubeToleration" }, - "description": "The tolerations that will be added to the pods for the auth module" + "description": "Set to override the tolerations that will be added to the pods of the Auth module." }, "ingresses": { "type": "object", @@ -1469,11 +1503,11 @@ "properties": { "host": { "type": "string", - "description": "The host of the ingress" + "description": "Use this host for the ingress instead of the default one." }, "ingressClass": { "type": "string", - "description": "The ingress class of the ingress" + "description": "Use this ingress class for the ingress instead of the default one." } }, "required": [ @@ -1492,7 +1526,7 @@ "basicAuth", "sso" ], - "description": "The type of the provider, must be ***none***, ***sso*** or ***basicAuth***" + "description": "The type of the Auth provider, options are:\n- `none`: will disable authentication in the infrastructural ingresses.\n- `sso`: will protect the infrastructural ingresses with Pomerium and Dex (SSO) and require authentication before accessing them.\n- `basicAuth`: will protect the infrastructural ingresses with HTTP basic auth (username and password) authentication.\n\nDefault is `none`." }, "basicAuth": { "$ref": "#/$defs/Spec.Distribution.Modules.Auth.Provider.BasicAuth" @@ -1505,14 +1539,15 @@ "Spec.Distribution.Modules.Auth.Provider.BasicAuth": { "type": "object", "additionalProperties": false, + "description": "Configuration for the HTTP Basic Auth provider.", "properties": { "username": { "type": "string", - "description": "The username for the basic auth" + "description": "The username for logging in with the HTTP basic authentication." }, "password": { "type": "string", - "description": "The password for the basic auth" + "description": "The password for logging in with the HTTP basic authentication." } }, "required": [ @@ -1526,14 +1561,15 @@ "Spec.Distribution.Modules.Auth.Dex": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Dex package.", "properties": { "connectors": { "type": "array", - "description": "The connectors for dex" + "description": "A list with each item defining a Dex connector. Follows Dex connectors configuration format: https://dexidp.io/docs/connectors/" }, "additionalStaticClients": { "type": "array", - "description": "The additional static clients for dex" + "description": "Additional static clients defitions that will be added to the default clients included with the distribution in Dex's configuration. Example:\n\n```yaml\nadditionalStaticClients:\n - id: my-custom-client\n name: \"A custom additional static client\"\n redirectURIs:\n - \"https://myapp.tld/redirect\"\n - \"https://alias.tld/oidc-callback\"\n secret: supersecretpassword\n```\nReference: https://dexidp.io/docs/connectors/local/" }, "expiry": { "type": "object", @@ -1597,11 +1633,29 @@ } }, "Types.KubeTaints": { - "type": "array", - "items": { - "type": "string", - "pattern": "^([a-zA-Z0-9\\-\\.\\/]+)=(\\w+):(NoSchedule|PreferNoSchedule|NoExecute)$" - } + "type": "object", + "additionalProperties": false, + "properties": { + "effect": { + "type": "string", + "enum": [ + "NoSchedule", + "PreferNoSchedule", + "NoExecute" + ] + }, + "key": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "required": [ + "effect", + "key", + "value" + ] }, "Types.KubeNodeSelector": { "type": [ @@ -1667,11 +1721,11 @@ "properties": { "cpu": { "type": "string", - "description": "The cpu request for the prometheus pods" + "description": "The CPU request for the Pod, in cores. Example: `500m`." }, "memory": { "type": "string", - "description": "The memory request for the opensearch pods" + "description": "The memory request for the Pod. Example: `500M`." } } }, @@ -1681,11 +1735,11 @@ "properties": { "cpu": { "type": "string", - "description": "The cpu limit for the loki pods" + "description": "The CPU limit for the Pod. Example: `1000m`." }, "memory": { "type": "string", - "description": "The memory limit for the opensearch pods" + "description": "The memory limit for the Pod. Example: `1G`." } } } @@ -1693,11 +1747,12 @@ }, "Types.FuryModuleOverrides": { "type": "object", + "description": "Override the common configuration with a particular configuration for the module.", "additionalProperties": false, "properties": { "nodeSelector": { "$ref": "#/$defs/Types.KubeNodeSelector", - "description": "The node selector to use to place the pods for the security module" + "description": "Set to override the node selector used to place the pods of the module." }, "tolerations": { "type": [ @@ -1707,7 +1762,7 @@ "items": { "$ref": "#/$defs/Types.KubeToleration" }, - "description": "The tolerations that will be added to the pods for the monitoring module" + "description": "Set to override the tolerations that will be added to the pods of the module." }, "ingresses": { "type": "object", @@ -1723,7 +1778,7 @@ "properties": { "nodeSelector": { "$ref": "#/$defs/Types.KubeNodeSelector", - "description": "The node selector to use to place the pods for the minio module" + "description": "Set to override the node selector used to place the pods of the package." }, "tolerations": { "type": [ @@ -1733,7 +1788,7 @@ "items": { "$ref": "#/$defs/Types.KubeToleration" }, - "description": "The tolerations that will be added to the pods for the cert-manager module" + "description": "Set to override the tolerations that will be added to the pods of the package." } } }, @@ -1743,15 +1798,15 @@ "properties": { "disableAuth": { "type": "boolean", - "description": "If true, the ingress will not have authentication" + "description": "If true, the ingress will not have authentication even if `.spec.modules.auth.provider.type` is SSO or Basic Auth." }, "host": { "type": "string", - "description": "The host of the ingress" + "description": "Use this host for the ingress instead of the default one." }, "ingressClass": { "type": "string", - "description": "The ingress class of the ingress" + "description": "Use this ingress class for the ingress instead of the default one." } } } diff --git a/schemas/public/onpremises-kfd-v1alpha2.json b/schemas/public/onpremises-kfd-v1alpha2.json index 44af1db96..26c3f87fc 100644 --- a/schemas/public/onpremises-kfd-v1alpha2.json +++ b/schemas/public/onpremises-kfd-v1alpha2.json @@ -1,6 +1,6 @@ { "$schema": "http://json-schema.org/draft-07/schema#", - "description": "", + "description": "A KFD Cluster deployed on top of a set of existing VMs.", "type": "object", "properties": { "apiVersion": { @@ -49,7 +49,7 @@ "properties": { "distributionVersion": { "type": "string", - "description": "Defines which KFD version will be installed and, in consequence, the Kubernetes version used to create the cluster. It supports git tags and branches. Example: v1.30.1.", + "description": "Defines which KFD version will be installed and, in consequence, the Kubernetes version used to create the cluster. It supports git tags and branches. Example: `v1.30.1`.", "minLength": 1 }, "kubernetes": { @@ -708,7 +708,7 @@ "properties": { "nodeSelector": { "$ref": "#/$defs/Types.KubeNodeSelector", - "description": "The node selector to use to place the pods for all the KFD modules. Follows Kubernetes selector format. Example: `node.kubernetes.io/role: infra`" + "description": "The node selector to use to place the pods for all the KFD modules. Follows Kubernetes selector format. Example: `node.kubernetes.io/role: infra`." }, "tolerations": { "type": "array", @@ -726,7 +726,7 @@ }, "registry": { "type": "string", - "description": "URL of the registry where to pull images from for the Distribution phase. (Default is `registry.sighup.io/fury`)." + "description": "URL of the registry where to pull images from for the Distribution phase. (Default is `registry.sighup.io/fury`).\n\nNOTE: If plugins are pulling from the default registry, the registry will be replaced for the plugin too." }, "networkPoliciesEnabled": { "type": "boolean", @@ -796,7 +796,7 @@ }, "nginx": { "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.Nginx", - "description": "Configurations for the nginx ingress controller package." + "description": "Configurations for the Ingress nginx controller package." }, "certManager": { "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.CertManager", @@ -841,14 +841,14 @@ }, "nodeSelector": { "$ref": "#/$defs/Types.KubeNodeSelector", - "description": "Set to override the node selector used to place the pods of the Ingress module" + "description": "Set to override the node selector used to place the pods of the Ingress module." }, "tolerations": { "type": "array", "items": { "$ref": "#/$defs/Types.KubeToleration" }, - "description": "Set to override the tolerations that will be added to the pods of the Ingress module" + "description": "Set to override the tolerations that will be added to the pods of the Ingress module." } } }, @@ -881,7 +881,7 @@ "single", "dual" ], - "description": "The type of the nginx ingress controller, options are:\n- `none`: no ingress controller will be installed and no infrastructural ingresses will be created.\n- `single`: a single ingress controller with ingress class `nginx` will be installed to manage all the ingress resources, infrastructural ingresses will be created.\n- `dual`: two independent ingress controllers will be installed, one for the `internal` ingress class intended for private ingresses and one for the `external` ingress class intended for public ingresses. KFD infrastructural ingresses wil use the `internal` ingress class when using the dual type." + "description": "The type of the Ingress nginx controller, options are:\n- `none`: no ingress controller will be installed and no infrastructural ingresses will be created.\n- `single`: a single ingress controller with ingress class `nginx` will be installed to manage all the ingress resources, infrastructural ingresses will be created.\n- `dual`: two independent ingress controllers will be installed, one for the `internal` ingress class intended for private ingresses and one for the `external` ingress class intended for public ingresses. KFD infrastructural ingresses wil use the `internal` ingress class when using the dual type.\n\nDefault is `single`." }, "tls": { "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.Nginx.TLS" @@ -974,7 +974,7 @@ "properties": { "name": { "type": "string", - "description": "Name of the clusterIssuer" + "description": "Name of the clusterIssuer." }, "email": { "type": "string", @@ -1026,7 +1026,7 @@ "loki", "customOutputs" ], - "description": "Selects the logging stack. Options are:\n- `none`: will disable the centralized logging.\n- `opensearch`: will deploy and configure the Logging Operator and an OpenSearch cluster (can be single or triple for HA) where the logs will be stored.\n- `loki`: will use a distributed Grafana Loki instead of OpenSearh for storage.\n- `customOuputs`: the Logging Operator will be deployed and installed but with no local storage, you will have to create the needed Outputs and ClusterOutputs to ship the logs to your desired storage." + "description": "Selects the logging stack. Options are:\n- `none`: will disable the centralized logging.\n- `opensearch`: will deploy and configure the Logging Operator and an OpenSearch cluster (can be single or triple for HA) where the logs will be stored.\n- `loki`: will use a distributed Grafana Loki instead of OpenSearh for storage.\n- `customOuputs`: the Logging Operator will be deployed and installed but with no local storage, you will have to create the needed Outputs and ClusterOutputs to ship the logs to your desired storage.\n\nDefault is `opensearch`." }, "opensearch": { "$ref": "#/$defs/Spec.Distribution.Modules.Logging.Opensearch" @@ -1124,7 +1124,7 @@ }, "Spec.Distribution.Modules.Logging.Cerebro": { "type": "object", - "description": "DEPRECATED in latest versions of KFD.", + "description": "DEPRECATED since KFD v1.26.6, 1.27.5, v1.28.0.", "additionalProperties": false, "properties": { "overrides": { @@ -1285,7 +1285,7 @@ "prometheusAgent", "mimir" ], - "description": "The type of the monitoring, must be `none`, `prometheus`, `prometheusAgent` or `mimir`.\n\n- `none`: will disable the whole monitoring stack.\n- `prometheus`: will install Prometheus Operator and a preconfigured Prometheus instace, Alertmanager, a set of alert rules, exporters needed to monitor all the components of the cluster, Grafana and a series of dashboards to view the collected metrics, and more.\n- `prometheusAgent`: wil install Prometheus operator, an instance of Prometheus in Agent mode (no alerting, no queries, no storage), and all the exporters needed to get metrics for the status of the cluster and the workloads. Useful when having a centralized (remote) Prometheus where to ship the metrics and not storing them locally in the cluster.\n- `mimir`: will install the same as the `prometheus` option, plus Grafana Mimir that allows for longer retention of metrics and the usage of Object Storage." + "description": "The type of the monitoring, must be `none`, `prometheus`, `prometheusAgent` or `mimir`.\n\n- `none`: will disable the whole monitoring stack.\n- `prometheus`: will install Prometheus Operator and a preconfigured Prometheus instace, Alertmanager, a set of alert rules, exporters needed to monitor all the components of the cluster, Grafana and a series of dashboards to view the collected metrics, and more.\n- `prometheusAgent`: wil install Prometheus operator, an instance of Prometheus in Agent mode (no alerting, no queries, no storage), and all the exporters needed to get metrics for the status of the cluster and the workloads. Useful when having a centralized (remote) Prometheus where to ship the metrics and not storing them locally in the cluster.\n- `mimir`: will install the same as the `prometheus` option, plus Grafana Mimir that allows for longer retention of metrics and the usage of Object Storage.\n\nDefault is `prometheus`." }, "overrides": { "$ref": "#/$defs/Types.FuryModuleOverrides" @@ -1372,7 +1372,7 @@ "properties": { "deadManSwitchWebhookUrl": { "type": "string", - "description": "The webhook URL to send dead man's switch monitoring, for example to use with healthchecks.io" + "description": "The webhook URL to send dead man's switch monitoring, for example to use with healthchecks.io." }, "installDefaultRules": { "type": "boolean", @@ -1519,7 +1519,7 @@ "none", "tempo" ], - "description": "The type of tracing to use, either `none` or `tempo`. `none` will disable the Tracing module and `tempo` will install a Grafana Tempo deployment." + "description": "The type of tracing to use, either `none` or `tempo`. `none` will disable the Tracing module and `tempo` will install a Grafana Tempo deployment.\n\nDefault is `tempo`." }, "tempo": { "$ref": "#/$defs/Spec.Distribution.Modules.Tracing.Tempo" @@ -1677,7 +1677,7 @@ "gatekeeper", "kyverno" ], - "description": "The type of policy enforcement to use, either `none`, `gatekeeper` or `kyverno`." + "description": "The type of policy enforcement to use, either `none`, `gatekeeper` or `kyverno`.\n\nDefault is `none`." }, "gatekeeper": { "$ref": "#/$defs/Spec.Distribution.Modules.Policy.Gatekeeper" @@ -1801,7 +1801,7 @@ "none", "on-premises" ], - "description": "The type of the Disaster Recovery, must be `none` or `on-premises`. `none` disables the module and `on-premises` will install Velero and an optional MinIO deployment." + "description": "The type of the Disaster Recovery, must be `none` or `on-premises`. `none` disables the module and `on-premises` will install Velero and an optional MinIO deployment.\n\nDefault is `none`." }, "velero": { "$ref": "#/$defs/Spec.Distribution.Modules.Dr.Velero" @@ -2100,7 +2100,7 @@ "basicAuth", "sso" ], - "description": "The type of the Auth provider, options are:\n- `none`: will disable authentication in the infrastructural ingresses.\n- `sso`: will protect the infrastructural ingresses with Pomerium and Dex (SSO) and require authentication before accessing them.\n- `basicAuth`: will protect the infrastructural ingresses with HTTP basic auth (username and password) authentication." + "description": "The type of the Auth provider, options are:\n- `none`: will disable authentication in the infrastructural ingresses.\n- `sso`: will protect the infrastructural ingresses with Pomerium and Dex (SSO) and require authentication before accessing them.\n- `basicAuth`: will protect the infrastructural ingresses with HTTP basic auth (username and password) authentication.\n\nDefault is `none`." }, "basicAuth": { "$ref": "#/$defs/Spec.Distribution.Modules.Auth.Provider.BasicAuth" @@ -2357,11 +2357,11 @@ "properties": { "cpu": { "type": "string", - "description": "The cpu request for the loki pods" + "description": "The CPU request for the Pod, in cores. Example: `500m`." }, "memory": { "type": "string", - "description": "The memory request for the prometheus pods" + "description": "The memory request for the Pod. Example: `500M`." } } }, @@ -2371,11 +2371,11 @@ "properties": { "cpu": { "type": "string", - "description": "The cpu limit for the loki pods" + "description": "The CPU limit for the Pod. Example: `1000m`." }, "memory": { "type": "string", - "description": "The memory limit for the prometheus pods" + "description": "The memory limit for the Pod. Example: `1G`." } } } diff --git a/templates/config/ekscluster-kfd-v1alpha2.yaml.tpl b/templates/config/ekscluster-kfd-v1alpha2.yaml.tpl index f823ad075..3dd175a5d 100644 --- a/templates/config/ekscluster-kfd-v1alpha2.yaml.tpl +++ b/templates/config/ekscluster-kfd-v1alpha2.yaml.tpl @@ -146,7 +146,7 @@ spec: # to: 80 # # Additional AWS tags # tags: {} - # aws-auth configmap definition, see https://docs.aws.amazon.com/eks/latest/userguide/add-user-role.html for more informations + # aws-auth configmap definition, see https://docs.aws.amazon.com/eks/latest/userguide/add-user-role.html for more information. awsAuth: {} # additionalAccounts: # - "777777777777" @@ -212,7 +212,7 @@ spec: # - http01: # ingress: # class: nginx - # DNS definition, used in conjunction with externalDNS package to automate DNS management and certificates emission + # DNS definition, used in conjunction with externalDNS package to automate DNS management and certificates emission. dns: # the public DNS zone definition public: