diff --git a/.github/workflows/feature-branch-image.yml b/.github/workflows/feature-branch-image.yml index 50e5f603492..21c4c30eb0e 100644 --- a/.github/workflows/feature-branch-image.yml +++ b/.github/workflows/feature-branch-image.yml @@ -1,42 +1,35 @@ --- name: Build and Push Feature Branch Custom Catalog env: - OPERATOR_IMG: quay.io/${{ secrets.QUAY_ORG }}/opendatahub-operator:${{ github.head_ref }} - BUNDLE_IMG: quay.io/${{ secrets.QUAY_ORG }}/opendatahub-operator-bundle:${{ github.head_ref }} + BUNDLE_IMG: quay.io/${{ secrets.QUAY_ORG }}/opendatahub-operator:${{ github.head_ref }} CATALOG_IMG: quay.io/${{ secrets.QUAY_ORG }}/opendatahub-operator-catalog:${{ github.head_ref }} on: - pull_request_target: + pull_request: branches: - - 'feature-**' - workflow_dispatch: + - "feature-**" permissions: - packages: write contents: read pull-requests: read - checks: write + checks: read jobs: + show-vars: + runs-on: ubuntu-latest + steps: + - name: GitHub Checks + uses: poseidon/wait-for-status-checks@v0.6.0 + with: + token: ${{ secrets.GITHUB_TOKEN }} + build-and-push-feature-branch-custom-catalog: + if: | + github.event_name == 'check_run' && github.event.check_run.conclusion == 'success' && + github.event.check_run.name == 'ci/prow/opendatahub-operator-pr-image-mirror' runs-on: ubuntu-latest timeout-minutes: 60 steps: - name: Login to quay.io run: | podman login -u ${{ secrets.QUAY_ID }} -p ${{ secrets.QUAY_TOKEN }} quay.io - - name: Build Feature Branch Image - run: | - make image-build -e IMG=${{ env.OPERATOR_IMG }} - - name: Push Feature Branch Image - run: | - make image-push -e IMG=${{ env.OPERATOR_IMG }} - - name: Build Bundle Manifests - run: | - make bundle - - name: Build Feature Branch Bundle - run: | - make bundle-build -e BUNDLE_IMG=${{ env.BUNDLE_IMG }} - - name: Push Feature Branch Bundle to quay.io - run: | - make bundle-push -e BUNDLE_IMG=${{ env.BUNDLE_IMG }} - name: Build Feature Branch Catalog run: | make catalog-build -e CATALOG_IMG=${{ env.CATALOG_IMG }} -e BUNDLE_IMG=${{ env.BUNDLE_IMG }} diff --git a/apis/components/v1/kserve_types.go b/apis/components/v1/kserve_types.go index cc5a842201a..16a309fbf73 100644 --- a/apis/components/v1/kserve_types.go +++ b/apis/components/v1/kserve_types.go @@ -17,20 +17,50 @@ limitations under the License. package v1 import ( - "github.com/opendatahub-io/opendatahub-operator/v2/apis/components" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/opendatahub-io/opendatahub-operator/v2/apis/components" + infrav1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/infrastructure/v1" ) -// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! +const ( + KserveComponentName = "kserve" + ModelControllerComponentName = "odh-model-controller" // shared by kserve and mm + // value should match what's set in the XValidation below + KserveInstanceName = "default-kserve" + KserveKind = "Kserve" +) + +// +kubebuilder:validation:Pattern=`^(Serverless|RawDeployment)$` +type DefaultDeploymentMode string + +const ( + // Serverless will be used as the default deployment mode for Kserve. This requires Serverless and ServiceMesh operators configured as dependencies. + Serverless DefaultDeploymentMode = "Serverless" + // RawDeployment will be used as the default deployment mode for Kserve. + RawDeployment DefaultDeploymentMode = "RawDeployment" +) + +// KserveCommonSpec spec defines the shared desired state of Kserve +type KserveCommonSpec struct { + components.DevFlagsSpec `json:",inline"` + // Serving configures the KNative-Serving stack used for model serving. A Service + // Mesh (Istio) is prerequisite, since it is used as networking layer. + Serving infrav1.ServingSpec `json:"serving,omitempty"` + // Configures the default deployment mode for Kserve. This can be set to 'Serverless' or 'RawDeployment'. + // The value specified in this field will be used to set the default deployment mode in the 'inferenceservice-config' configmap for Kserve. + // This field is optional. If no default deployment mode is specified, Kserve will use Serverless mode. + // +kubebuilder:validation:Enum=Serverless;RawDeployment + DefaultDeploymentMode DefaultDeploymentMode `json:"defaultDeploymentMode,omitempty"` +} + // NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. // KserveSpec defines the desired state of Kserve type KserveSpec struct { - // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster - // Important: Run "make" to regenerate code after modifying this file - - // Foo is an example field of Kserve. Edit kserve_types.go to remove/update - Foo string `json:"foo,omitempty"` + // kserve spec exposed to DSC api + KserveCommonSpec `json:",inline"` + // kserve spec exposed only to internal api } // KserveStatus defines the observed state of Kserve @@ -41,6 +71,9 @@ type KserveStatus struct { // +kubebuilder:object:root=true // +kubebuilder:subresource:status // +kubebuilder:resource:scope=Cluster +// +kubebuilder:validation:XValidation:rule="self.metadata.name == 'default-kserve'",message="Kserve name must be default-kserve" +// +kubebuilder:printcolumn:name="Ready",type=string,JSONPath=`.status.conditions[?(@.type=="Ready")].status`,description="Ready" +// +kubebuilder:printcolumn:name="Reason",type=string,JSONPath=`.status.conditions[?(@.type=="Ready")].reason`,description="Reason" // Kserve is the Schema for the kserves API type Kserve struct { @@ -52,7 +85,7 @@ type Kserve struct { } func (c *Kserve) GetDevFlags() *components.DevFlags { - return nil + return c.Spec.DevFlags } func (c *Kserve) GetStatus() *components.Status { @@ -71,3 +104,11 @@ type KserveList struct { func init() { SchemeBuilder.Register(&Kserve{}, &KserveList{}) } + +// DSCKserve contains all the configuration exposed in DSC instance for Kserve component +type DSCKserve struct { + // configuration fields common across components + components.ManagementSpec `json:",inline"` + // Kserve specific fields + KserveCommonSpec `json:",inline"` +} diff --git a/apis/components/v1/zz_generated.deepcopy.go b/apis/components/v1/zz_generated.deepcopy.go index 8c4249bc2aa..dba5f0a7c6c 100644 --- a/apis/components/v1/zz_generated.deepcopy.go +++ b/apis/components/v1/zz_generated.deepcopy.go @@ -182,6 +182,23 @@ func (in *DSCDataSciencePipelines) DeepCopy() *DSCDataSciencePipelines { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DSCKserve) DeepCopyInto(out *DSCKserve) { + *out = *in + out.ManagementSpec = in.ManagementSpec + in.KserveCommonSpec.DeepCopyInto(&out.KserveCommonSpec) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DSCKserve. +func (in *DSCKserve) DeepCopy() *DSCKserve { + if in == nil { + return nil + } + out := new(DSCKserve) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DSCKueue) DeepCopyInto(out *DSCKueue) { *out = *in @@ -518,7 +535,7 @@ func (in *Kserve) DeepCopyInto(out *Kserve) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - out.Spec = in.Spec + in.Spec.DeepCopyInto(&out.Spec) in.Status.DeepCopyInto(&out.Status) } @@ -540,6 +557,23 @@ func (in *Kserve) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KserveCommonSpec) DeepCopyInto(out *KserveCommonSpec) { + *out = *in + in.DevFlagsSpec.DeepCopyInto(&out.DevFlagsSpec) + out.Serving = in.Serving +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KserveCommonSpec. +func (in *KserveCommonSpec) DeepCopy() *KserveCommonSpec { + if in == nil { + return nil + } + out := new(KserveCommonSpec) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *KserveList) DeepCopyInto(out *KserveList) { *out = *in @@ -575,6 +609,7 @@ func (in *KserveList) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *KserveSpec) DeepCopyInto(out *KserveSpec) { *out = *in + in.KserveCommonSpec.DeepCopyInto(&out.KserveCommonSpec) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KserveSpec. diff --git a/apis/datasciencecluster/v1/datasciencecluster_types.go b/apis/datasciencecluster/v1/datasciencecluster_types.go index 11bd7b91aac..c90631a18df 100644 --- a/apis/datasciencecluster/v1/datasciencecluster_types.go +++ b/apis/datasciencecluster/v1/datasciencecluster_types.go @@ -26,7 +26,6 @@ import ( componentsv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/components/v1" "github.com/opendatahub-io/opendatahub-operator/v2/components" - "github.com/opendatahub-io/opendatahub-operator/v2/components/kserve" "github.com/opendatahub-io/opendatahub-operator/v2/components/modelmeshserving" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster" ) @@ -56,7 +55,7 @@ type Components struct { // Kserve component configuration. // Require OpenShift Serverless and OpenShift Service Mesh Operators to be installed before enable component // Does not support enabled ModelMeshServing at the same time - Kserve kserve.Kserve `json:"kserve,omitempty"` + Kserve componentsv1.DSCKserve `json:"kserve,omitempty"` // Kueue component configuration. Kueue componentsv1.DSCKueue `json:"kueue,omitempty"` diff --git a/bundle/manifests/components.opendatahub.io_codeflares.yaml b/bundle/manifests/components.opendatahub.io_codeflares.yaml index 2315e8ffe97..e17f5580582 100644 --- a/bundle/manifests/components.opendatahub.io_codeflares.yaml +++ b/bundle/manifests/components.opendatahub.io_codeflares.yaml @@ -14,7 +14,16 @@ spec: singular: codeflare scope: Cluster versions: - - name: v1 + - additionalPrinterColumns: + - description: Ready + jsonPath: .status.conditions[?(@.type=="Ready")].status + name: Ready + type: string + - description: Reason + jsonPath: .status.conditions[?(@.type=="Ready")].reason + name: Reason + type: string + name: v1 schema: openAPIV3Schema: description: CodeFlare is the Schema for the codeflares API @@ -37,12 +46,33 @@ spec: metadata: type: object spec: - description: CodeFlareSpec defines the desired state of CodeFlare properties: - foo: - description: Foo is an example field of CodeFlare. Edit codeflare_types.go - to remove/update - type: string + devFlags: + description: Add developer fields + properties: + manifests: + description: List of custom manifests for the given component + items: + properties: + contextDir: + default: manifests + description: contextDir is the relative path to the folder + containing manifests in a repository, default value "manifests" + type: string + sourcePath: + default: "" + description: 'sourcePath is the subpath within contextDir + where kustomize builds start. Examples include any sub-folder + or path: `base`, `overlays/dev`, `default`, `odh` etc.' + type: string + uri: + default: "" + description: uri is the URI point to a git repo with tag/branch. + e.g. https://github.com/org/repo/tarball/ + type: string + type: object + type: array + type: object type: object status: description: CodeFlareStatus defines the observed state of CodeFlare @@ -110,6 +140,9 @@ spec: type: string type: object type: object + x-kubernetes-validations: + - message: CodeFlare name must be default-codeflare + rule: self.metadata.name == 'default-codeflare' served: true storage: true subresources: diff --git a/bundle/manifests/components.opendatahub.io_kserves.yaml b/bundle/manifests/components.opendatahub.io_kserves.yaml index b9795aa7a40..eeba3e410d1 100644 --- a/bundle/manifests/components.opendatahub.io_kserves.yaml +++ b/bundle/manifests/components.opendatahub.io_kserves.yaml @@ -14,7 +14,16 @@ spec: singular: kserve scope: Cluster versions: - - name: v1 + - additionalPrinterColumns: + - description: Ready + jsonPath: .status.conditions[?(@.type=="Ready")].status + name: Ready + type: string + - description: Reason + jsonPath: .status.conditions[?(@.type=="Ready")].reason + name: Reason + type: string + name: v1 schema: openAPIV3Schema: description: Kserve is the Schema for the kserves API @@ -39,10 +48,100 @@ spec: spec: description: KserveSpec defines the desired state of Kserve properties: - foo: - description: Foo is an example field of Kserve. Edit kserve_types.go - to remove/update + defaultDeploymentMode: + description: |- + Configures the default deployment mode for Kserve. This can be set to 'Serverless' or 'RawDeployment'. + The value specified in this field will be used to set the default deployment mode in the 'inferenceservice-config' configmap for Kserve. + This field is optional. If no default deployment mode is specified, Kserve will use Serverless mode. + enum: + - Serverless + - RawDeployment + pattern: ^(Serverless|RawDeployment)$ type: string + devFlags: + description: Add developer fields + properties: + manifests: + description: List of custom manifests for the given component + items: + properties: + contextDir: + default: manifests + description: contextDir is the relative path to the folder + containing manifests in a repository, default value "manifests" + type: string + sourcePath: + default: "" + description: 'sourcePath is the subpath within contextDir + where kustomize builds start. Examples include any sub-folder + or path: `base`, `overlays/dev`, `default`, `odh` etc.' + type: string + uri: + default: "" + description: uri is the URI point to a git repo with tag/branch. + e.g. https://github.com/org/repo/tarball/ + type: string + type: object + type: array + type: object + serving: + description: |- + Serving configures the KNative-Serving stack used for model serving. A Service + Mesh (Istio) is prerequisite, since it is used as networking layer. + properties: + ingressGateway: + description: |- + IngressGateway allows to customize some parameters for the Istio Ingress Gateway + that is bound to KNative-Serving. + properties: + certificate: + description: |- + Certificate specifies configuration of the TLS certificate securing communication + for the gateway. + properties: + secretName: + description: |- + SecretName specifies the name of the Kubernetes Secret resource that contains a + TLS certificate secure HTTP communications for the KNative network. + type: string + type: + default: OpenshiftDefaultIngress + description: |- + Type specifies if the TLS certificate should be generated automatically, or if the certificate + is provided by the user. Allowed values are: + * SelfSigned: A certificate is going to be generated using an own private key. + * Provided: Pre-existence of the TLS Secret (see SecretName) with a valid certificate is assumed. + * OpenshiftDefaultIngress: Default ingress certificate configured for OpenShift + enum: + - SelfSigned + - Provided + - OpenshiftDefaultIngress + type: string + type: object + domain: + description: |- + Domain specifies the host name for intercepting incoming requests. + Most likely, you will want to use a wildcard name, like *.example.com. + If not set, the domain of the OpenShift Ingress is used. + If you choose to generate a certificate, this is the domain used for the certificate request. + type: string + type: object + managementState: + default: Managed + enum: + - Managed + - Unmanaged + - Removed + pattern: ^(Managed|Unmanaged|Force|Removed)$ + type: string + name: + default: knative-serving + description: |- + Name specifies the name of the KNativeServing resource that is going to be + created to instruct the KNative Operator to deploy KNative serving components. + This resource is created in the "knative-serving" namespace. + type: string + type: object type: object status: description: KserveStatus defines the observed state of Kserve @@ -110,6 +209,9 @@ spec: type: string type: object type: object + x-kubernetes-validations: + - message: Kserve name must be default-kserve + rule: self.metadata.name == 'default-kserve' served: true storage: true subresources: diff --git a/components/kserve/kserve.go b/components/kserve/kserve.go deleted file mode 100644 index 85b739285ea..00000000000 --- a/components/kserve/kserve.go +++ /dev/null @@ -1,190 +0,0 @@ -// Package kserve provides utility functions to config Kserve as the Controller for serving ML models on arbitrary frameworks -// +groupName=datasciencecluster.opendatahub.io -package kserve - -import ( - "context" - "fmt" - "path/filepath" - "strings" - - "github.com/go-logr/logr" - operatorv1 "github.com/openshift/api/operator/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "sigs.k8s.io/controller-runtime/pkg/client" - logf "sigs.k8s.io/controller-runtime/pkg/log" - - dsciv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/dscinitialization/v1" - infrav1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/infrastructure/v1" - "github.com/opendatahub-io/opendatahub-operator/v2/components" - "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster" - "github.com/opendatahub-io/opendatahub-operator/v2/pkg/deploy" -) - -var ( - ComponentName = "kserve" - Path = deploy.DefaultManifestPath + "/" + ComponentName + "/overlays/odh" - DependentComponentName = "odh-model-controller" - DependentPath = deploy.DefaultManifestPath + "/" + DependentComponentName + "/base" - ServiceMeshOperator = "servicemeshoperator" - ServerlessOperator = "serverless-operator" -) - -// Verifies that Kserve implements ComponentInterface. -var _ components.ComponentInterface = (*Kserve)(nil) - -// +kubebuilder:validation:Pattern=`^(Serverless|RawDeployment)$` -type DefaultDeploymentMode string - -var ( - // Serverless will be used as the default deployment mode for Kserve. This requires Serverless and ServiceMesh operators configured as dependencies. - Serverless DefaultDeploymentMode = "Serverless" - // RawDeployment will be used as the default deployment mode for Kserve. - RawDeployment DefaultDeploymentMode = "RawDeployment" -) - -// Kserve struct holds the configuration for the Kserve component. -// +kubebuilder:object:generate=true -type Kserve struct { - components.Component `json:""` - // Serving configures the KNative-Serving stack used for model serving. A Service - // Mesh (Istio) is prerequisite, since it is used as networking layer. - Serving infrav1.ServingSpec `json:"serving,omitempty"` - // Configures the default deployment mode for Kserve. This can be set to 'Serverless' or 'RawDeployment'. - // The value specified in this field will be used to set the default deployment mode in the 'inferenceservice-config' configmap for Kserve. - // This field is optional. If no default deployment mode is specified, Kserve will use Serverless mode. - // +kubebuilder:validation:Enum=Serverless;RawDeployment - DefaultDeploymentMode DefaultDeploymentMode `json:"defaultDeploymentMode,omitempty"` -} - -func (k *Kserve) Init(ctx context.Context, _ cluster.Platform) error { - log := logf.FromContext(ctx).WithName(ComponentName) - - // dependentParamMap for odh-model-controller to use. - var dependentParamMap = map[string]string{ - "odh-model-controller": "RELATED_IMAGE_ODH_MODEL_CONTROLLER_IMAGE", - } - - // Update image parameters for odh-model-controller - if err := deploy.ApplyParams(DependentPath, dependentParamMap); err != nil { - log.Error(err, "failed to update image", "path", DependentPath) - } - - return nil -} - -func (k *Kserve) OverrideManifests(ctx context.Context, _ cluster.Platform) error { - // Download manifests if defined by devflags - // Go through each manifest and set the overlays if defined - for _, subcomponent := range k.DevFlags.Manifests { - if strings.Contains(subcomponent.URI, DependentComponentName) { - // Download subcomponent - if err := deploy.DownloadManifests(ctx, DependentComponentName, subcomponent); err != nil { - return err - } - // If overlay is defined, update paths - defaultKustomizePath := "base" - if subcomponent.SourcePath != "" { - defaultKustomizePath = subcomponent.SourcePath - } - DependentPath = filepath.Join(deploy.DefaultManifestPath, DependentComponentName, defaultKustomizePath) - } - - if strings.Contains(subcomponent.URI, ComponentName) { - // Download subcomponent - if err := deploy.DownloadManifests(ctx, ComponentName, subcomponent); err != nil { - return err - } - // If overlay is defined, update paths - defaultKustomizePath := "overlays/odh" - if subcomponent.SourcePath != "" { - defaultKustomizePath = subcomponent.SourcePath - } - Path = filepath.Join(deploy.DefaultManifestPath, ComponentName, defaultKustomizePath) - } - } - - return nil -} - -func (k *Kserve) GetComponentName() string { - return ComponentName -} - -func (k *Kserve) ReconcileComponent(ctx context.Context, cli client.Client, - l logr.Logger, owner metav1.Object, dscispec *dsciv1.DSCInitializationSpec, platform cluster.Platform, _ bool) error { - enabled := k.GetManagementState() == operatorv1.Managed - monitoringEnabled := dscispec.Monitoring.ManagementState == operatorv1.Managed - - if !enabled { - if err := k.removeServerlessFeatures(ctx, cli, owner, dscispec); err != nil { - return err - } - } else { - // Configure dependencies - if err := k.configureServerless(ctx, cli, l, owner, dscispec); err != nil { - return err - } - if k.DevFlags != nil { - // Download manifests and update paths - if err := k.OverrideManifests(ctx, platform); err != nil { - return err - } - } - } - - if err := k.configureServiceMesh(ctx, cli, owner, dscispec); err != nil { - return fmt.Errorf("failed configuring service mesh while reconciling kserve component. cause: %w", err) - } - - if err := deploy.DeployManifestsFromPath(ctx, cli, owner, Path, dscispec.ApplicationsNamespace, ComponentName, enabled); err != nil { - return fmt.Errorf("failed to apply manifests from %s : %w", Path, err) - } - - l.WithValues("Path", Path).Info("apply manifests done for kserve") - - if enabled { - if err := k.setupKserveConfig(ctx, cli, l, dscispec); err != nil { - return err - } - - // For odh-model-controller - if err := cluster.UpdatePodSecurityRolebinding(ctx, cli, dscispec.ApplicationsNamespace, "odh-model-controller"); err != nil { - return err - } - } - - if err := deploy.DeployManifestsFromPath(ctx, cli, owner, DependentPath, dscispec.ApplicationsNamespace, ComponentName, enabled); err != nil { - if !strings.Contains(err.Error(), "spec.selector") || !strings.Contains(err.Error(), "field is immutable") { - // explicitly ignore error if error contains keywords "spec.selector" and "field is immutable" and return all other error. - return err - } - } - l.WithValues("Path", Path).Info("apply manifests done for odh-model-controller") - - // Wait for deployment available - if enabled { - if err := cluster.WaitForDeploymentAvailable(ctx, cli, ComponentName, dscispec.ApplicationsNamespace, 20, 3); err != nil { - return fmt.Errorf("deployment for %s is not ready to server: %w", ComponentName, err) - } - } - - // CloudService Monitoring handling - if platform == cluster.ManagedRhods { - // kesrve rules - if err := k.UpdatePrometheusConfig(cli, l, enabled && monitoringEnabled, ComponentName); err != nil { - return err - } - l.Info("updating SRE monitoring done") - } - - return nil -} - -func (k *Kserve) Cleanup(ctx context.Context, cli client.Client, owner metav1.Object, instance *dsciv1.DSCInitializationSpec) error { - if removeServerlessErr := k.removeServerlessFeatures(ctx, cli, owner, instance); removeServerlessErr != nil { - return removeServerlessErr - } - - return k.removeServiceMeshConfigurations(ctx, cli, owner, instance) -} diff --git a/components/kserve/kserve_config_handler.go b/components/kserve/kserve_config_handler.go deleted file mode 100644 index e43e7313b2f..00000000000 --- a/components/kserve/kserve_config_handler.go +++ /dev/null @@ -1,185 +0,0 @@ -package kserve - -import ( - "context" - "encoding/json" - "errors" - "fmt" - - "github.com/go-logr/logr" - "github.com/hashicorp/go-multierror" - operatorv1 "github.com/openshift/api/operator/v1" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "sigs.k8s.io/controller-runtime/pkg/client" - - dsciv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/dscinitialization/v1" - "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster" - "github.com/opendatahub-io/opendatahub-operator/v2/pkg/feature" - "github.com/opendatahub-io/opendatahub-operator/v2/pkg/metadata/labels" -) - -const ( - KserveConfigMapName string = "inferenceservice-config" -) - -func (k *Kserve) setupKserveConfig(ctx context.Context, cli client.Client, logger logr.Logger, dscispec *dsciv1.DSCInitializationSpec) error { - // as long as Kserve.Serving is not 'Removed', we will setup the dependencies - - switch k.Serving.ManagementState { - case operatorv1.Managed, operatorv1.Unmanaged: - if k.DefaultDeploymentMode == "" { - // if the default mode is empty in the DSC, assume mode is "Serverless" since k.Serving is Managed - if err := k.setDefaultDeploymentMode(ctx, cli, dscispec, Serverless); err != nil { - return err - } - } else { - // if the default mode is explicitly specified, respect that - if err := k.setDefaultDeploymentMode(ctx, cli, dscispec, k.DefaultDeploymentMode); err != nil { - return err - } - } - case operatorv1.Removed: - if k.DefaultDeploymentMode == Serverless { - return errors.New("setting defaultdeployment mode as Serverless is incompatible with having Serving 'Removed'") - } - if k.DefaultDeploymentMode == "" { - logger.Info("Serving is removed, Kserve will default to rawdeployment") - } - if err := k.setDefaultDeploymentMode(ctx, cli, dscispec, RawDeployment); err != nil { - return err - } - } - return nil -} - -func (k *Kserve) setDefaultDeploymentMode(ctx context.Context, cli client.Client, dscispec *dsciv1.DSCInitializationSpec, defaultmode DefaultDeploymentMode) error { - inferenceServiceConfigMap := &corev1.ConfigMap{} - err := cli.Get(ctx, client.ObjectKey{ - Namespace: dscispec.ApplicationsNamespace, - Name: KserveConfigMapName, - }, inferenceServiceConfigMap) - if err != nil { - return fmt.Errorf("error getting configmap %v: %w", KserveConfigMapName, err) - } - - // set data.deploy.defaultDeploymentMode to the model specified in the Kserve spec - var deployData map[string]interface{} - if err = json.Unmarshal([]byte(inferenceServiceConfigMap.Data["deploy"]), &deployData); err != nil { - return fmt.Errorf("error retrieving value for key 'deploy' from configmap %s. %w", KserveConfigMapName, err) - } - modeFound := deployData["defaultDeploymentMode"] - if modeFound != string(defaultmode) { - deployData["defaultDeploymentMode"] = defaultmode - deployDataBytes, err := json.MarshalIndent(deployData, "", " ") - if err != nil { - return fmt.Errorf("could not set values in configmap %s. %w", KserveConfigMapName, err) - } - inferenceServiceConfigMap.Data["deploy"] = string(deployDataBytes) - - var ingressData map[string]interface{} - if err = json.Unmarshal([]byte(inferenceServiceConfigMap.Data["ingress"]), &ingressData); err != nil { - return fmt.Errorf("error retrieving value for key 'ingress' from configmap %s. %w", KserveConfigMapName, err) - } - if defaultmode == RawDeployment { - ingressData["disableIngressCreation"] = true - } else { - ingressData["disableIngressCreation"] = false - } - ingressDataBytes, err := json.MarshalIndent(ingressData, "", " ") - if err != nil { - return fmt.Errorf("could not set values in configmap %s. %w", KserveConfigMapName, err) - } - inferenceServiceConfigMap.Data["ingress"] = string(ingressDataBytes) - - if err = cli.Update(ctx, inferenceServiceConfigMap); err != nil { - return fmt.Errorf("could not set default deployment mode for Kserve. %w", err) - } - - // Restart the pod if configmap is updated so that kserve boots with the correct value - podList := &corev1.PodList{} - listOpts := []client.ListOption{ - client.InNamespace(dscispec.ApplicationsNamespace), - client.MatchingLabels{ - labels.ODH.Component(ComponentName): "true", - "control-plane": "kserve-controller-manager", - }, - } - if err := cli.List(ctx, podList, listOpts...); err != nil { - return fmt.Errorf("failed to list pods: %w", err) - } - for _, pod := range podList.Items { - pod := pod - if err := cli.Delete(ctx, &pod); err != nil { - return fmt.Errorf("failed to delete pod %s: %w", pod.Name, err) - } - } - } - - return nil -} - -func (k *Kserve) configureServerless(ctx context.Context, cli client.Client, logger logr.Logger, owner metav1.Object, instance *dsciv1.DSCInitializationSpec) error { - switch k.Serving.ManagementState { - case operatorv1.Unmanaged: // Bring your own CR - logger.Info("Serverless CR is not configured by the operator, we won't do anything") - - case operatorv1.Removed: // we remove serving CR - logger.Info("existing Serverless CR (owned by operator) will be removed") - if err := k.removeServerlessFeatures(ctx, cli, owner, instance); err != nil { - return err - } - - case operatorv1.Managed: // standard workflow to create CR - if instance.ServiceMesh == nil { - return errors.New("ServiceMesh needs to be configured and 'Managed' in DSCI CR, " + - "it is required by KServe serving") - } - - switch instance.ServiceMesh.ManagementState { - case operatorv1.Unmanaged, operatorv1.Removed: - return fmt.Errorf("ServiceMesh is currently set to '%s'. It needs to be set to 'Managed' in DSCI CR, "+ - "as it is required by the KServe serving field", instance.ServiceMesh.ManagementState) - } - - // check on dependent operators if all installed in cluster - dependOpsErrors := checkDependentOperators(ctx, cli).ErrorOrNil() - if dependOpsErrors != nil { - return dependOpsErrors - } - - serverlessFeatures := feature.ComponentFeaturesHandler(owner, k.GetComponentName(), instance.ApplicationsNamespace, k.configureServerlessFeatures(instance)) - - if err := serverlessFeatures.Apply(ctx, cli); err != nil { - return err - } - } - return nil -} - -func (k *Kserve) removeServerlessFeatures(ctx context.Context, cli client.Client, owner metav1.Object, instance *dsciv1.DSCInitializationSpec) error { - serverlessFeatures := feature.ComponentFeaturesHandler(owner, k.GetComponentName(), instance.ApplicationsNamespace, k.configureServerlessFeatures(instance)) - - return serverlessFeatures.Delete(ctx, cli) -} - -func checkDependentOperators(ctx context.Context, cli client.Client) *multierror.Error { - var multiErr *multierror.Error - - if found, err := cluster.OperatorExists(ctx, cli, ServiceMeshOperator); err != nil { - multiErr = multierror.Append(multiErr, err) - } else if !found { - err = fmt.Errorf("operator %s not found. Please install the operator before enabling %s component", - ServiceMeshOperator, ComponentName) - multiErr = multierror.Append(multiErr, err) - } - - if found, err := cluster.OperatorExists(ctx, cli, ServerlessOperator); err != nil { - multiErr = multierror.Append(multiErr, err) - } else if !found { - err = fmt.Errorf("operator %s not found. Please install the operator before enabling %s component", - ServerlessOperator, ComponentName) - multiErr = multierror.Append(multiErr, err) - } - return multiErr -} diff --git a/components/kserve/resources/servicemesh/z-migrations/kserve-predictor-authorizationpolicy.patch.tmpl.yaml b/components/kserve/resources/servicemesh/z-migrations/kserve-predictor-authorizationpolicy.patch.tmpl.yaml deleted file mode 100644 index 0d141dd8d17..00000000000 --- a/components/kserve/resources/servicemesh/z-migrations/kserve-predictor-authorizationpolicy.patch.tmpl.yaml +++ /dev/null @@ -1,8 +0,0 @@ -apiVersion: security.istio.io/v1beta1 -kind: AuthorizationPolicy -metadata: - name: kserve-predictor - namespace: {{ .ControlPlane.Namespace }} -spec: - provider: - name: {{ .AuthExtensionName }} diff --git a/components/kserve/serverless_setup.go b/components/kserve/serverless_setup.go deleted file mode 100644 index ee3766ebe23..00000000000 --- a/components/kserve/serverless_setup.go +++ /dev/null @@ -1,72 +0,0 @@ -package kserve - -import ( - "path" - - dsciv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/dscinitialization/v1" - "github.com/opendatahub-io/opendatahub-operator/v2/pkg/feature" - "github.com/opendatahub-io/opendatahub-operator/v2/pkg/feature/manifest" - "github.com/opendatahub-io/opendatahub-operator/v2/pkg/feature/serverless" - "github.com/opendatahub-io/opendatahub-operator/v2/pkg/feature/servicemesh" -) - -func (k *Kserve) configureServerlessFeatures(dsciSpec *dsciv1.DSCInitializationSpec) feature.FeaturesProvider { - return func(registry feature.FeaturesRegistry) error { - servingDeployment := feature.Define("serverless-serving-deployment"). - Manifests( - manifest.Location(Resources.Location). - Include( - path.Join(Resources.InstallDir), - ), - ). - WithData( - serverless.FeatureData.IngressDomain.Define(&k.Serving).AsAction(), - serverless.FeatureData.Serving.Define(&k.Serving).AsAction(), - servicemesh.FeatureData.ControlPlane.Define(dsciSpec).AsAction(), - ). - PreConditions( - serverless.EnsureServerlessOperatorInstalled, - serverless.EnsureServerlessAbsent, - servicemesh.EnsureServiceMeshInstalled, - feature.CreateNamespaceIfNotExists(serverless.KnativeServingNamespace), - ). - PostConditions( - feature.WaitForPodsToBeReady(serverless.KnativeServingNamespace), - ) - - istioSecretFiltering := feature.Define("serverless-net-istio-secret-filtering"). - Manifests( - manifest.Location(Resources.Location). - Include( - path.Join(Resources.BaseDir, "serving-net-istio-secret-filtering.patch.tmpl.yaml"), - ), - ). - WithData(serverless.FeatureData.Serving.Define(&k.Serving).AsAction()). - PreConditions(serverless.EnsureServerlessServingDeployed). - PostConditions( - feature.WaitForPodsToBeReady(serverless.KnativeServingNamespace), - ) - - servingGateway := feature.Define("serverless-serving-gateways"). - Manifests( - manifest.Location(Resources.Location). - Include( - path.Join(Resources.GatewaysDir), - ), - ). - WithData( - serverless.FeatureData.IngressDomain.Define(&k.Serving).AsAction(), - serverless.FeatureData.CertificateName.Define(&k.Serving).AsAction(), - serverless.FeatureData.Serving.Define(&k.Serving).AsAction(), - servicemesh.FeatureData.ControlPlane.Define(dsciSpec).AsAction(), - ). - WithResources(serverless.ServingCertificateResource). - PreConditions(serverless.EnsureServerlessServingDeployed) - - return registry.Add( - servingDeployment, - istioSecretFiltering, - servingGateway, - ) - } -} diff --git a/components/kserve/servicemesh_setup.go b/components/kserve/servicemesh_setup.go deleted file mode 100644 index 126e23d88ea..00000000000 --- a/components/kserve/servicemesh_setup.go +++ /dev/null @@ -1,76 +0,0 @@ -package kserve - -import ( - "context" - "fmt" - "path" - - operatorv1 "github.com/openshift/api/operator/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/client" - - dsciv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/dscinitialization/v1" - "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster" - "github.com/opendatahub-io/opendatahub-operator/v2/pkg/feature" - "github.com/opendatahub-io/opendatahub-operator/v2/pkg/feature/manifest" - "github.com/opendatahub-io/opendatahub-operator/v2/pkg/feature/servicemesh" -) - -func (k *Kserve) configureServiceMesh(ctx context.Context, cli client.Client, owner metav1.Object, dscispec *dsciv1.DSCInitializationSpec) error { - if dscispec.ServiceMesh != nil { - if dscispec.ServiceMesh.ManagementState == operatorv1.Managed && k.GetManagementState() == operatorv1.Managed { - serviceMeshInitializer := feature.ComponentFeaturesHandler(owner, k.GetComponentName(), dscispec.ApplicationsNamespace, k.defineServiceMeshFeatures(ctx, cli, dscispec)) - return serviceMeshInitializer.Apply(ctx, cli) - } - if dscispec.ServiceMesh.ManagementState == operatorv1.Unmanaged && k.GetManagementState() == operatorv1.Managed { - return nil - } - } - - return k.removeServiceMeshConfigurations(ctx, cli, owner, dscispec) -} - -func (k *Kserve) removeServiceMeshConfigurations(ctx context.Context, cli client.Client, owner metav1.Object, dscispec *dsciv1.DSCInitializationSpec) error { - serviceMeshInitializer := feature.ComponentFeaturesHandler(owner, k.GetComponentName(), dscispec.ApplicationsNamespace, k.defineServiceMeshFeatures(ctx, cli, dscispec)) - return serviceMeshInitializer.Delete(ctx, cli) -} - -func (k *Kserve) defineServiceMeshFeatures(ctx context.Context, cli client.Client, dscispec *dsciv1.DSCInitializationSpec) feature.FeaturesProvider { - return func(registry feature.FeaturesRegistry) error { - authorinoInstalled, err := cluster.SubscriptionExists(ctx, cli, "authorino-operator") - if err != nil { - return fmt.Errorf("failed to list subscriptions %w", err) - } - - if authorinoInstalled { - kserveExtAuthzErr := registry.Add(feature.Define("kserve-external-authz"). - Manifests( - manifest.Location(Resources.Location). - Include( - path.Join(Resources.ServiceMeshDir, "activator-envoyfilter.tmpl.yaml"), - path.Join(Resources.ServiceMeshDir, "envoy-oauth-temp-fix.tmpl.yaml"), - path.Join(Resources.ServiceMeshDir, "kserve-predictor-authorizationpolicy.tmpl.yaml"), - path.Join(Resources.ServiceMeshDir, "z-migrations"), - ), - ). - Managed(). - WithData( - feature.Entry("Domain", cluster.GetDomain), - servicemesh.FeatureData.ControlPlane.Define(dscispec).AsAction(), - ). - WithData( - servicemesh.FeatureData.Authorization.All(dscispec)..., - ), - ) - - if kserveExtAuthzErr != nil { - return kserveExtAuthzErr - } - } else { - ctrl.Log.Info("WARN: Authorino operator is not installed on the cluster, skipping authorization capability") - } - - return nil - } -} diff --git a/components/kserve/zz_generated.deepcopy.go b/components/kserve/zz_generated.deepcopy.go deleted file mode 100644 index da6e99960b7..00000000000 --- a/components/kserve/zz_generated.deepcopy.go +++ /dev/null @@ -1,40 +0,0 @@ -//go:build !ignore_autogenerated - -/* -Copyright 2023. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by controller-gen. DO NOT EDIT. - -package kserve - -import () - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Kserve) DeepCopyInto(out *Kserve) { - *out = *in - in.Component.DeepCopyInto(&out.Component) - out.Serving = in.Serving -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Kserve. -func (in *Kserve) DeepCopy() *Kserve { - if in == nil { - return nil - } - out := new(Kserve) - in.DeepCopyInto(out) - return out -} diff --git a/config/crd/bases/components.opendatahub.io_kserves.yaml b/config/crd/bases/components.opendatahub.io_kserves.yaml index e4772479834..aae5b95784c 100644 --- a/config/crd/bases/components.opendatahub.io_kserves.yaml +++ b/config/crd/bases/components.opendatahub.io_kserves.yaml @@ -14,7 +14,16 @@ spec: singular: kserve scope: Cluster versions: - - name: v1 + - additionalPrinterColumns: + - description: Ready + jsonPath: .status.conditions[?(@.type=="Ready")].status + name: Ready + type: string + - description: Reason + jsonPath: .status.conditions[?(@.type=="Ready")].reason + name: Reason + type: string + name: v1 schema: openAPIV3Schema: description: Kserve is the Schema for the kserves API @@ -39,10 +48,100 @@ spec: spec: description: KserveSpec defines the desired state of Kserve properties: - foo: - description: Foo is an example field of Kserve. Edit kserve_types.go - to remove/update + defaultDeploymentMode: + description: |- + Configures the default deployment mode for Kserve. This can be set to 'Serverless' or 'RawDeployment'. + The value specified in this field will be used to set the default deployment mode in the 'inferenceservice-config' configmap for Kserve. + This field is optional. If no default deployment mode is specified, Kserve will use Serverless mode. + enum: + - Serverless + - RawDeployment + pattern: ^(Serverless|RawDeployment)$ type: string + devFlags: + description: Add developer fields + properties: + manifests: + description: List of custom manifests for the given component + items: + properties: + contextDir: + default: manifests + description: contextDir is the relative path to the folder + containing manifests in a repository, default value "manifests" + type: string + sourcePath: + default: "" + description: 'sourcePath is the subpath within contextDir + where kustomize builds start. Examples include any sub-folder + or path: `base`, `overlays/dev`, `default`, `odh` etc.' + type: string + uri: + default: "" + description: uri is the URI point to a git repo with tag/branch. + e.g. https://github.com/org/repo/tarball/ + type: string + type: object + type: array + type: object + serving: + description: |- + Serving configures the KNative-Serving stack used for model serving. A Service + Mesh (Istio) is prerequisite, since it is used as networking layer. + properties: + ingressGateway: + description: |- + IngressGateway allows to customize some parameters for the Istio Ingress Gateway + that is bound to KNative-Serving. + properties: + certificate: + description: |- + Certificate specifies configuration of the TLS certificate securing communication + for the gateway. + properties: + secretName: + description: |- + SecretName specifies the name of the Kubernetes Secret resource that contains a + TLS certificate secure HTTP communications for the KNative network. + type: string + type: + default: OpenshiftDefaultIngress + description: |- + Type specifies if the TLS certificate should be generated automatically, or if the certificate + is provided by the user. Allowed values are: + * SelfSigned: A certificate is going to be generated using an own private key. + * Provided: Pre-existence of the TLS Secret (see SecretName) with a valid certificate is assumed. + * OpenshiftDefaultIngress: Default ingress certificate configured for OpenShift + enum: + - SelfSigned + - Provided + - OpenshiftDefaultIngress + type: string + type: object + domain: + description: |- + Domain specifies the host name for intercepting incoming requests. + Most likely, you will want to use a wildcard name, like *.example.com. + If not set, the domain of the OpenShift Ingress is used. + If you choose to generate a certificate, this is the domain used for the certificate request. + type: string + type: object + managementState: + default: Managed + enum: + - Managed + - Unmanaged + - Removed + pattern: ^(Managed|Unmanaged|Force|Removed)$ + type: string + name: + default: knative-serving + description: |- + Name specifies the name of the KNativeServing resource that is going to be + created to instruct the KNative Operator to deploy KNative serving components. + This resource is created in the "knative-serving" namespace. + type: string + type: object type: object status: description: KserveStatus defines the observed state of Kserve @@ -110,6 +209,9 @@ spec: type: string type: object type: object + x-kubernetes-validations: + - message: Kserve name must be default-kserve + rule: self.metadata.name == 'default-kserve' served: true storage: true subresources: diff --git a/controllers/components/dashboard/dashboard_controller_actions.go b/controllers/components/dashboard/dashboard_controller_actions.go index 0458803ee0a..b6c31de504f 100644 --- a/controllers/components/dashboard/dashboard_controller_actions.go +++ b/controllers/components/dashboard/dashboard_controller_actions.go @@ -79,7 +79,7 @@ func configureDependencies(_ context.Context, rr *odhtypes.ReconciliationRequest return nil } - err := rr.AddResource(&corev1.Secret{ + err := rr.AddResources(&corev1.Secret{ TypeMeta: metav1.TypeMeta{ APIVersion: corev1.SchemeGroupVersion.String(), Kind: "Secret", diff --git a/components/kserve/feature_resources.go b/controllers/components/kserve/feature_resources.go similarity index 100% rename from components/kserve/feature_resources.go rename to controllers/components/kserve/feature_resources.go diff --git a/controllers/components/kserve/kserve.go b/controllers/components/kserve/kserve.go new file mode 100644 index 00000000000..97156189d42 --- /dev/null +++ b/controllers/components/kserve/kserve.go @@ -0,0 +1,83 @@ +package kserve + +import ( + "fmt" + + operatorv1 "github.com/openshift/api/operator/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + + componentsv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/components/v1" + dscv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/datasciencecluster/v1" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster" + cr "github.com/opendatahub-io/opendatahub-operator/v2/pkg/componentsregistry" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/deploy" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/metadata/annotations" +) + +const ( + componentName = componentsv1.KserveComponentName + odhModelControllerComponentName = componentsv1.ModelControllerComponentName + + serviceMeshOperator = "servicemeshoperator" + serverlessOperator = "serverless-operator" + + kserveConfigMapName = "inferenceservice-config" + + kserveManifestSourcePath = "overlays/odh" + odhModelControllerManifestSourcePath = "base" +) + +type componentHandler struct{} + +func init() { //nolint:gochecknoinits + cr.Add(&componentHandler{}) +} + +func (s *componentHandler) GetName() string { + return componentName +} + +func (s *componentHandler) GetManagementState(dsc *dscv1.DataScienceCluster) operatorv1.ManagementState { + if dsc.Spec.Components.Kserve.ManagementState == operatorv1.Managed { + return operatorv1.Managed + } + return operatorv1.Removed +} + +// Init for set images. +func (s *componentHandler) Init(platform cluster.Platform) error { + omcManifestInfo := odhModelControllerManifestInfo(odhModelControllerManifestSourcePath) + + // dependentParamMap for odh-model-controller to use. + var dependentParamMap = map[string]string{ + odhModelControllerComponentName: "RELATED_IMAGE_ODH_MODEL_CONTROLLER_IMAGE", + } + + // Update image parameters for odh-model-controller + if err := deploy.ApplyParams(omcManifestInfo.String(), dependentParamMap); err != nil { + return fmt.Errorf("failed to update images on path %s: %w", omcManifestInfo.String(), err) + } + + return nil +} + +// for DSC to get compoment Kserve's CR. +func (s *componentHandler) NewCRObject(dsc *dscv1.DataScienceCluster) client.Object { + kserveAnnotations := make(map[string]string) + kserveAnnotations[annotations.ManagementStateAnnotation] = string(s.GetManagementState(dsc)) + + return client.Object(&componentsv1.Kserve{ + TypeMeta: metav1.TypeMeta{ + Kind: componentsv1.KserveKind, + APIVersion: componentsv1.GroupVersion.String(), + }, + ObjectMeta: metav1.ObjectMeta{ + Name: componentsv1.KserveInstanceName, + Annotations: kserveAnnotations, + }, + Spec: componentsv1.KserveSpec{ + KserveCommonSpec: dsc.Spec.Components.Kserve.KserveCommonSpec, + }, + }) +} diff --git a/controllers/components/kserve/kserve_controller.go b/controllers/components/kserve/kserve_controller.go index 9387e2d15d7..6b8b2cf0dfd 100644 --- a/controllers/components/kserve/kserve_controller.go +++ b/controllers/components/kserve/kserve_controller.go @@ -19,40 +19,142 @@ package kserve import ( "context" - "k8s.io/apimachinery/pkg/runtime" + templatev1 "github.com/openshift/api/template/v1" + monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" + admissionregistrationv1 "k8s.io/api/admissionregistration/v1" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + networkingv1 "k8s.io/api/networking/v1" + rbacv1 "k8s.io/api/rbac/v1" + extv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/handler" componentsv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/components/v1" + featuresv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/features/v1" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster/gvk" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/actions/deploy" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/actions/gc" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/actions/render/kustomize" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/actions/security" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/actions/updatestatus" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/predicates/clusterrole" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/predicates/hash" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/predicates/resources" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/reconciler" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/metadata/labels" ) -// KserveReconciler reconciles a Kserve object. -type KserveReconciler struct { - client.Client - Scheme *runtime.Scheme -} +// NewComponentReconciler creates a ComponentReconciler for the Dashboard API. +func (s *componentHandler) NewComponentReconciler(ctx context.Context, mgr ctrl.Manager) error { + ownedViaFTMapFunc := ownedViaFT(mgr.GetClient()) -// Reconcile is part of the main kubernetes reconciliation loop which aims to -// move the current state of the cluster closer to the desired state. -// TODO(user): Modify the Reconcile function to compare the state specified by -// the Kserve object against the actual cluster state, and then -// perform operations to make the cluster state reflect the state specified by -// the user. -// -// For more details, check Reconcile and its Result here: -// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.12.2/pkg/reconcile -func (r *KserveReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { - _ = log.FromContext(ctx) - - // TODO(user): your logic here - - return ctrl.Result{}, nil -} + _, err := reconciler.ComponentReconcilerFor(mgr, &componentsv1.Kserve{}). + // operands - owned + Owns(&corev1.Secret{}). + Owns(&corev1.Service{}). + Owns(&corev1.ConfigMap{}). + Owns(&corev1.ServiceAccount{}). + Owns(&rbacv1.Role{}). + Owns(&rbacv1.RoleBinding{}). + Owns(&rbacv1.ClusterRole{}, reconciler.WithPredicates(clusterrole.IgnoreIfAggregationRule())). + Owns(&rbacv1.ClusterRoleBinding{}). + // The ovms template gets a new resourceVersion periodically without any other + // changes. The compareHashPredicate ensures that we don't needlessly enqueue + // requests if there are no changes that we don't care about. + Owns(&templatev1.Template{}, reconciler.WithPredicates(hash.Updated())). + // The FeatureTrackers are created slightly differently, and have + // ownerRefs set by controllerutil.SetOwnerReference() rather than + // controllerutil.SetControllerReference(), which means that the default + // eventHandler for Owns won't work, so a slightly modified variant is + // added here + Owns(&featuresv1.FeatureTracker{}, reconciler.WithEventHandler( + handler.EnqueueRequestForOwner( + mgr.GetScheme(), + mgr.GetRESTMapper(), + &componentsv1.Kserve{}, + ))). + Owns(&networkingv1.NetworkPolicy{}). + Owns(&monitoringv1.ServiceMonitor{}). + Owns(&admissionregistrationv1.MutatingWebhookConfiguration{}). + Owns(&admissionregistrationv1.ValidatingWebhookConfiguration{}). + Owns(&appsv1.Deployment{}, reconciler.WithPredicates(resources.NewDeploymentPredicate())). + // operands - watched + // + // By default the Watches functions adds: + // - an event handler mapping to a cluster scope resource identified by the + // components.opendatahub.io/managed-by annotation + // - a predicate that check for generation change for Delete/Updates events + // for to objects that have the label components.opendatahub.io/managed-by + // set to the current owner + // + Watches(&extv1.CustomResourceDefinition{}). + + // operands - dynamically watched + // + // A watch will be created dynamically for these kinds, if they exist on the cluster + // (they come from ServiceMesh and Serverless operators). + // + // They're owned by FeatureTrackers, which are owned by a Kserve; so there's a + // custom event mapper to enqueue a reconcile request for a Kserve object, if + // applicable. + // + // They also don't have the "partOf" label that Watches expects in the + // implicit predicate, so the simpler "DefaultPredicate" is also added. + WatchesGVK( + gvk.KnativeServing, + reconciler.Dynamic(), + reconciler.WithEventMapper(ownedViaFTMapFunc), + reconciler.WithPredicates(reconciler.DefaultPredicate)). + WatchesGVK( + gvk.ServiceMeshMember, + reconciler.Dynamic(), + reconciler.WithEventMapper(ownedViaFTMapFunc), + reconciler.WithPredicates(reconciler.DefaultPredicate)). + WatchesGVK( + gvk.EnvoyFilter, + reconciler.Dynamic(), + reconciler.WithEventMapper(ownedViaFTMapFunc), + reconciler.WithPredicates(reconciler.DefaultPredicate)). + WatchesGVK( + gvk.AuthorizationPolicy, + reconciler.Dynamic(), + reconciler.WithEventMapper(ownedViaFTMapFunc), + reconciler.WithPredicates(reconciler.DefaultPredicate)). + WatchesGVK( + gvk.Gateway, + reconciler.Dynamic(), + reconciler.WithEventMapper(ownedViaFTMapFunc), + reconciler.WithPredicates(reconciler.DefaultPredicate)). + + // actions + WithAction(checkPreConditions). + WithAction(initialize). + WithAction(devFlags). + WithAction(configureServerless). + WithAction(configureServiceMesh). + WithAction(kustomize.NewAction( + kustomize.WithCache(), + // These are the default labels added by the legacy deploy method + // and should be preserved as the original plugin were affecting + // deployment selectors that are immutable once created, so it won't + // be possible to actually amend the labels in a non-disruptive + // manner. + // + // Additional labels/annotations MUST be added by the deploy action + // so they would affect only objects metadata without side effects + kustomize.WithLabel(labels.ODH.Component(componentName), "true"), + kustomize.WithLabel(labels.K8SCommon.PartOf, componentName), + )). + WithAction(customizeKserveConfigMap). + WithAction(deploy.NewAction( + deploy.WithCache(), + )). + WithAction(security.NewUpdatePodSecurityRoleBindingAction(serviceAccounts)). + WithAction(updatestatus.NewAction()). + // must be the final action + WithAction(gc.NewAction()). + Build(ctx) -// SetupWithManager sets up the controller with the Manager. -func (r *KserveReconciler) SetupWithManager(mgr ctrl.Manager) error { - return ctrl.NewControllerManagedBy(mgr). - For(&componentsv1.Kserve{}). - Complete(r) + return err } diff --git a/controllers/components/kserve/kserve_controller_actions.go b/controllers/components/kserve/kserve_controller_actions.go new file mode 100644 index 00000000000..66d83dfa04c --- /dev/null +++ b/controllers/components/kserve/kserve_controller_actions.go @@ -0,0 +1,273 @@ +package kserve + +import ( + "context" + "errors" + "fmt" + "strings" + + operatorv1 "github.com/openshift/api/operator/v1" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + logf "sigs.k8s.io/controller-runtime/pkg/log" + + componentsv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/components/v1" + "github.com/opendatahub-io/opendatahub-operator/v2/controllers/status" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster/gvk" + odherrors "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/actions/errors" + odhtypes "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/types" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/deploy" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/feature" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/metadata/labels" +) + +func checkPreConditions(ctx context.Context, rr *odhtypes.ReconciliationRequest) error { + k, ok := rr.Instance.(*componentsv1.Kserve) + if !ok { + return fmt.Errorf("resource instance %v is not a componentsv1.Kserve)", rr.Instance) + } + + if k.Spec.Serving.ManagementState != operatorv1.Managed { + return nil + } + + if rr.DSCI.Spec.ServiceMesh == nil || rr.DSCI.Spec.ServiceMesh.ManagementState != operatorv1.Managed { + s := k.GetStatus() + s.Phase = status.PhaseNotReady + + meta.SetStatusCondition(&s.Conditions, metav1.Condition{ + Type: status.ConditionTypeReady, + Status: metav1.ConditionFalse, + Reason: status.ServiceMeshNotConfiguredReason, + Message: status.ServiceMeshNotConfiguredMessage, + ObservedGeneration: s.ObservedGeneration, + }) + + return odherrors.NewStopError(status.ServiceMeshNotConfiguredMessage) + } + + if found, err := cluster.OperatorExists(ctx, rr.Client, serviceMeshOperator); err != nil || !found { + s := k.GetStatus() + s.Phase = status.PhaseNotReady + + meta.SetStatusCondition(&s.Conditions, metav1.Condition{ + Type: status.ConditionTypeReady, + Status: metav1.ConditionFalse, + Reason: status.ServiceMeshOperatorNotInstalledReason, + Message: status.ServiceMeshOperatorNotInstalledMessage, + ObservedGeneration: s.ObservedGeneration, + }) + + if err != nil { + return odherrors.NewStopErrorW(err) + } + + return odherrors.NewStopError(status.ServiceMeshOperatorNotInstalledMessage) + } + + if found, err := cluster.OperatorExists(ctx, rr.Client, serverlessOperator); err != nil || !found { + s := k.GetStatus() + s.Phase = status.PhaseNotReady + + meta.SetStatusCondition(&s.Conditions, metav1.Condition{ + Type: status.ConditionTypeReady, + Status: metav1.ConditionFalse, + Reason: status.ServerlessOperatorNotInstalledReason, + Message: status.ServerlessOperatorNotInstalledMessage, + ObservedGeneration: s.ObservedGeneration, + }) + + if err != nil { + return odherrors.NewStopErrorW(err) + } + + return odherrors.NewStopError(status.ServerlessOperatorNotInstalledMessage) + } + + return nil +} + +func initialize(ctx context.Context, rr *odhtypes.ReconciliationRequest) error { + rr.Manifests = []odhtypes.ManifestInfo{ + kserveManifestInfo(kserveManifestSourcePath), + odhModelControllerManifestInfo(odhModelControllerManifestSourcePath), + } + + return nil +} + +func devFlags(ctx context.Context, rr *odhtypes.ReconciliationRequest) error { + k, ok := rr.Instance.(*componentsv1.Kserve) + if !ok { + return fmt.Errorf("resource instance %v is not a componentsv1.Kserve)", rr.Instance) + } + + df := k.GetDevFlags() + if df == nil { + return nil + } + if len(df.Manifests) == 0 { + return nil + } + + kSourcePath := kserveManifestSourcePath + omcSourcePath := odhModelControllerManifestSourcePath + + for _, subcomponent := range df.Manifests { + if strings.Contains(subcomponent.URI, odhModelControllerComponentName) { + if err := deploy.DownloadManifests(ctx, odhModelControllerComponentName, subcomponent); err != nil { + return err + } + + if subcomponent.SourcePath != "" { + omcSourcePath = subcomponent.SourcePath + } + } + + if strings.Contains(subcomponent.URI, componentName) { + if err := deploy.DownloadManifests(ctx, componentName, subcomponent); err != nil { + return err + } + + if subcomponent.SourcePath != "" { + kSourcePath = subcomponent.SourcePath + } + } + } + + rr.Manifests = []odhtypes.ManifestInfo{ + kserveManifestInfo(kSourcePath), + odhModelControllerManifestInfo(omcSourcePath), + } + + return nil +} + +func configureServerless(ctx context.Context, rr *odhtypes.ReconciliationRequest) error { + k, ok := rr.Instance.(*componentsv1.Kserve) + if !ok { + return fmt.Errorf("resource instance %v is not a componentsv1.Kserve)", rr.Instance) + } + + logger := logf.FromContext(ctx) + cli := rr.Client + + switch k.Spec.Serving.ManagementState { + case operatorv1.Unmanaged: // Bring your own CR + logger.Info("Serverless CR is not configured by the operator, we won't do anything") + + case operatorv1.Removed: // we remove serving CR + logger.Info("existing Serverless CR (owned by operator) will be removed") + if err := removeServerlessFeatures(ctx, rr.Client, k, &rr.DSCI.Spec); err != nil { + return err + } + + case operatorv1.Managed: // standard workflow to create CR + if rr.DSCI.Spec.ServiceMesh == nil { + return errors.New("ServiceMesh needs to be configured and 'Managed' in DSCI CR, " + + "it is required by KServe serving") + } + + switch rr.DSCI.Spec.ServiceMesh.ManagementState { + case operatorv1.Unmanaged, operatorv1.Removed: + return fmt.Errorf("ServiceMesh is currently set to '%s'. It needs to be set to 'Managed' in DSCI CR, "+ + "as it is required by the KServe serving field", rr.DSCI.Spec.ServiceMesh.ManagementState) + } + + serverlessFeatures := feature.ComponentFeaturesHandler(rr.Instance, componentName, rr.DSCI.Spec.ApplicationsNamespace, configureServerlessFeatures(&rr.DSCI.Spec, k)) + + if err := serverlessFeatures.Apply(ctx, cli); err != nil { + return err + } + } + return nil +} + +func configureServiceMesh(ctx context.Context, rr *odhtypes.ReconciliationRequest) error { + k, ok := rr.Instance.(*componentsv1.Kserve) + if !ok { + return fmt.Errorf("resource instance %v is not a componentsv1.Kserve)", rr.Instance) + } + + cli := rr.Client + + if rr.DSCI.Spec.ServiceMesh != nil { + if rr.DSCI.Spec.ServiceMesh.ManagementState == operatorv1.Managed { + serviceMeshInitializer := feature.ComponentFeaturesHandler(k, componentName, rr.DSCI.Spec.ApplicationsNamespace, defineServiceMeshFeatures(ctx, cli, &rr.DSCI.Spec)) + return serviceMeshInitializer.Apply(ctx, cli) + } + if rr.DSCI.Spec.ServiceMesh.ManagementState == operatorv1.Unmanaged { + return nil + } + } + + return removeServiceMeshConfigurations(ctx, cli, k, &rr.DSCI.Spec) +} + +func customizeKserveConfigMap(ctx context.Context, rr *odhtypes.ReconciliationRequest) error { + k, ok := rr.Instance.(*componentsv1.Kserve) + if !ok { + return fmt.Errorf("resource instance %v is not a componentsv1.Kserve)", rr.Instance) + } + + logger := logf.FromContext(ctx) + + kserveConfigMap := corev1.ConfigMap{} + cmidx, err := getIndexedResource(rr.Resources, &kserveConfigMap, gvk.ConfigMap, kserveConfigMapName) + if err != nil { + return err + } + + switch k.Spec.Serving.ManagementState { + case operatorv1.Managed, operatorv1.Unmanaged: + if k.Spec.DefaultDeploymentMode == "" { + // if the default mode is empty in the DSC, assume mode is "Serverless" since k.Serving is Managed + if err := setDefaultDeploymentMode(&kserveConfigMap, componentsv1.Serverless); err != nil { + return err + } + } else { + // if the default mode is explicitly specified, respect that + if err := setDefaultDeploymentMode(&kserveConfigMap, k.Spec.DefaultDeploymentMode); err != nil { + return err + } + } + case operatorv1.Removed: + if k.Spec.DefaultDeploymentMode == componentsv1.Serverless { + return errors.New("setting defaultdeployment mode as Serverless is incompatible with having Serving 'Removed'") + } + if k.Spec.DefaultDeploymentMode == "" { + logger.Info("Serving is removed, Kserve will default to RawDeployment") + } + if err := setDefaultDeploymentMode(&kserveConfigMap, componentsv1.RawDeployment); err != nil { + return err + } + } + + err = replaceResourceAtIndex(rr.Resources, cmidx, &kserveConfigMap) + if err != nil { + return err + } + + kserveConfigHash, err := hashConfigMap(&kserveConfigMap) + if err != nil { + return err + } + + kserveDeployment := appsv1.Deployment{} + deployidx, err := getIndexedResource(rr.Resources, &kserveDeployment, gvk.Deployment, "kserve-controller-manager") + if err != nil { + return err + } + + kserveDeployment.Spec.Template.Annotations[labels.ODHAppPrefix+"/KserveConfigHash"] = kserveConfigHash + + err = replaceResourceAtIndex(rr.Resources, deployidx, &kserveDeployment) + if err != nil { + return err + } + + return nil +} diff --git a/controllers/components/kserve/kserve_support.go b/controllers/components/kserve/kserve_support.go new file mode 100644 index 00000000000..cb09731a558 --- /dev/null +++ b/controllers/components/kserve/kserve_support.go @@ -0,0 +1,268 @@ +package kserve + +import ( + "context" + "encoding/base64" + "encoding/json" + "fmt" + "path" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + componentsv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/components/v1" + dsciv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/dscinitialization/v1" + featuresv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/features/v1" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster" + odhtypes "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/types" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/deploy" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/feature" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/feature/manifest" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/feature/serverless" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/feature/servicemesh" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/resources" +) + +var serviceAccounts = map[cluster.Platform][]string{ + cluster.Unknown: {odhModelControllerComponentName}, + cluster.OpenDataHub: {odhModelControllerComponentName}, + cluster.ManagedRhods: {odhModelControllerComponentName}, + cluster.SelfManagedRhods: {odhModelControllerComponentName}, +} + +func kserveManifestInfo(sourcePath string) odhtypes.ManifestInfo { + return odhtypes.ManifestInfo{ + Path: deploy.DefaultManifestPath, + ContextDir: componentName, + SourcePath: sourcePath, + } +} + +func odhModelControllerManifestInfo(sourcePath string) odhtypes.ManifestInfo { + return odhtypes.ManifestInfo{ + Path: deploy.DefaultManifestPath, + ContextDir: odhModelControllerComponentName, + SourcePath: sourcePath, + } +} + +func configureServerlessFeatures(dsciSpec *dsciv1.DSCInitializationSpec, kserve *componentsv1.Kserve) feature.FeaturesProvider { + return func(registry feature.FeaturesRegistry) error { + servingDeployment := feature.Define("serverless-serving-deployment"). + Manifests( + manifest.Location(Resources.Location). + Include( + path.Join(Resources.InstallDir), + ), + ). + WithData( + serverless.FeatureData.IngressDomain.Define(&kserve.Spec.Serving).AsAction(), + serverless.FeatureData.Serving.Define(&kserve.Spec.Serving).AsAction(), + servicemesh.FeatureData.ControlPlane.Define(dsciSpec).AsAction(), + ). + PreConditions( + serverless.EnsureServerlessOperatorInstalled, + serverless.EnsureServerlessAbsent, + servicemesh.EnsureServiceMeshInstalled, + feature.CreateNamespaceIfNotExists(serverless.KnativeServingNamespace), + ). + PostConditions( + feature.WaitForPodsToBeReady(serverless.KnativeServingNamespace), + ) + + istioSecretFiltering := feature.Define("serverless-net-istio-secret-filtering"). + Manifests( + manifest.Location(Resources.Location). + Include( + path.Join(Resources.BaseDir, "serving-net-istio-secret-filtering.patch.tmpl.yaml"), + ), + ). + WithData(serverless.FeatureData.Serving.Define(&kserve.Spec.Serving).AsAction()). + PreConditions(serverless.EnsureServerlessServingDeployed). + PostConditions( + feature.WaitForPodsToBeReady(serverless.KnativeServingNamespace), + ) + + servingGateway := feature.Define("serverless-serving-gateways"). + Manifests( + manifest.Location(Resources.Location). + Include( + path.Join(Resources.GatewaysDir), + ), + ). + WithData( + serverless.FeatureData.IngressDomain.Define(&kserve.Spec.Serving).AsAction(), + serverless.FeatureData.CertificateName.Define(&kserve.Spec.Serving).AsAction(), + serverless.FeatureData.Serving.Define(&kserve.Spec.Serving).AsAction(), + servicemesh.FeatureData.ControlPlane.Define(dsciSpec).AsAction(), + ). + WithResources(serverless.ServingCertificateResource). + PreConditions(serverless.EnsureServerlessServingDeployed) + + return registry.Add( + servingDeployment, + istioSecretFiltering, + servingGateway, + ) + } +} + +func defineServiceMeshFeatures(ctx context.Context, cli client.Client, dscispec *dsciv1.DSCInitializationSpec) feature.FeaturesProvider { + return func(registry feature.FeaturesRegistry) error { + authorinoInstalled, err := cluster.SubscriptionExists(ctx, cli, "authorino-operator") + if err != nil { + return fmt.Errorf("failed to list subscriptions %w", err) + } + + if authorinoInstalled { + kserveExtAuthzErr := registry.Add(feature.Define("kserve-external-authz"). + Manifests( + manifest.Location(Resources.Location). + Include( + path.Join(Resources.ServiceMeshDir, "activator-envoyfilter.tmpl.yaml"), + path.Join(Resources.ServiceMeshDir, "envoy-oauth-temp-fix.tmpl.yaml"), + path.Join(Resources.ServiceMeshDir, "kserve-predictor-authorizationpolicy.tmpl.yaml"), + ), + ). + Managed(). + WithData( + feature.Entry("Domain", cluster.GetDomain), + servicemesh.FeatureData.ControlPlane.Define(dscispec).AsAction(), + ). + WithData( + servicemesh.FeatureData.Authorization.All(dscispec)..., + ), + ) + + if kserveExtAuthzErr != nil { + return kserveExtAuthzErr + } + } else { + ctrl.Log.Info("WARN: Authorino operator is not installed on the cluster, skipping authorization capability") + } + + return nil + } +} + +func removeServiceMeshConfigurations(ctx context.Context, cli client.Client, owner metav1.Object, dscispec *dsciv1.DSCInitializationSpec) error { + serviceMeshInitializer := feature.ComponentFeaturesHandler(owner, componentName, dscispec.ApplicationsNamespace, defineServiceMeshFeatures(ctx, cli, dscispec)) + return serviceMeshInitializer.Delete(ctx, cli) +} + +func removeServerlessFeatures(ctx context.Context, cli client.Client, k *componentsv1.Kserve, dscispec *dsciv1.DSCInitializationSpec) error { + serverlessFeatures := feature.ComponentFeaturesHandler(k, componentName, dscispec.ApplicationsNamespace, configureServerlessFeatures(dscispec, k)) + return serverlessFeatures.Delete(ctx, cli) +} + +func setDefaultDeploymentMode(inferenceServiceConfigMap *corev1.ConfigMap, defaultmode componentsv1.DefaultDeploymentMode) error { + // set data.deploy.defaultDeploymentMode to the model specified in the Kserve spec + var deployData map[string]interface{} + if err := json.Unmarshal([]byte(inferenceServiceConfigMap.Data["deploy"]), &deployData); err != nil { + return fmt.Errorf("error retrieving value for key 'deploy' from configmap %s. %w", kserveConfigMapName, err) + } + modeFound := deployData["defaultDeploymentMode"] + if modeFound != string(defaultmode) { + deployData["defaultDeploymentMode"] = defaultmode + deployDataBytes, err := json.MarshalIndent(deployData, "", " ") + if err != nil { + return fmt.Errorf("could not set values in configmap %s. %w", kserveConfigMapName, err) + } + inferenceServiceConfigMap.Data["deploy"] = string(deployDataBytes) + + var ingressData map[string]interface{} + if err = json.Unmarshal([]byte(inferenceServiceConfigMap.Data["ingress"]), &ingressData); err != nil { + return fmt.Errorf("error retrieving value for key 'ingress' from configmap %s. %w", kserveConfigMapName, err) + } + if defaultmode == componentsv1.RawDeployment { + ingressData["disableIngressCreation"] = true + } else { + ingressData["disableIngressCreation"] = false + } + ingressDataBytes, err := json.MarshalIndent(ingressData, "", " ") + if err != nil { + return fmt.Errorf("could not set values in configmap %s. %w", kserveConfigMapName, err) + } + inferenceServiceConfigMap.Data["ingress"] = string(ingressDataBytes) + } + + return nil +} + +func getIndexedResource(rs []unstructured.Unstructured, obj any, g schema.GroupVersionKind, name string) (int, error) { + var idx = -1 + for i, r := range rs { + if r.GroupVersionKind() == g && r.GetName() == name { + idx = i + break + } + } + + if idx == -1 { + return -1, fmt.Errorf("could not find %T with name %v in resources list", obj, name) + } + + err := runtime.DefaultUnstructuredConverter.FromUnstructured(rs[idx].Object, obj) + if err != nil { + return idx, fmt.Errorf("failed converting to %T from Unstructured.Object: %v", obj, rs[idx].Object) + } + + return idx, nil +} + +func replaceResourceAtIndex(rs []unstructured.Unstructured, idx int, obj any) error { + u, err := resources.ToUnstructured(obj) + if err != nil { + return err + } + + rs[idx] = *u + return nil +} + +func hashConfigMap(cm *corev1.ConfigMap) (string, error) { + u, err := resources.ToUnstructured(cm) + if err != nil { + return "", err + } + + h, err := resources.Hash(u) + if err != nil { + return "", err + } + + return base64.RawURLEncoding.EncodeToString(h), nil +} + +func ownedViaFT(cli client.Client) handler.MapFunc { + return func(ctx context.Context, a client.Object) []reconcile.Request { + for _, or := range a.GetOwnerReferences() { + if or.Kind == "FeatureTracker" { + ft := featuresv1.FeatureTracker{} + if err := cli.Get(ctx, client.ObjectKey{Name: or.Name}, &ft); err != nil { + return []reconcile.Request{} + } + + for _, ftor := range ft.GetOwnerReferences() { + if ftor.Kind == componentsv1.KserveKind && ftor.Name != "" { + return []reconcile.Request{{ + NamespacedName: types.NamespacedName{ + Name: ftor.Name, + }, + }} + } + } + } + } + + return []reconcile.Request{} + } +} diff --git a/components/kserve/resources/servicemesh/activator-envoyfilter.tmpl.yaml b/controllers/components/kserve/resources/servicemesh/activator-envoyfilter.tmpl.yaml similarity index 100% rename from components/kserve/resources/servicemesh/activator-envoyfilter.tmpl.yaml rename to controllers/components/kserve/resources/servicemesh/activator-envoyfilter.tmpl.yaml diff --git a/components/kserve/resources/servicemesh/envoy-oauth-temp-fix.tmpl.yaml b/controllers/components/kserve/resources/servicemesh/envoy-oauth-temp-fix.tmpl.yaml similarity index 100% rename from components/kserve/resources/servicemesh/envoy-oauth-temp-fix.tmpl.yaml rename to controllers/components/kserve/resources/servicemesh/envoy-oauth-temp-fix.tmpl.yaml diff --git a/components/kserve/resources/servicemesh/kserve-predictor-authorizationpolicy.tmpl.yaml b/controllers/components/kserve/resources/servicemesh/kserve-predictor-authorizationpolicy.tmpl.yaml similarity index 91% rename from components/kserve/resources/servicemesh/kserve-predictor-authorizationpolicy.tmpl.yaml rename to controllers/components/kserve/resources/servicemesh/kserve-predictor-authorizationpolicy.tmpl.yaml index a79057f26a9..49002669468 100644 --- a/components/kserve/resources/servicemesh/kserve-predictor-authorizationpolicy.tmpl.yaml +++ b/controllers/components/kserve/resources/servicemesh/kserve-predictor-authorizationpolicy.tmpl.yaml @@ -9,7 +9,7 @@ metadata: spec: action: CUSTOM provider: - name: opendatahub-odh-auth-provider + name: {{ .AuthExtensionName }} rules: - to: - operation: diff --git a/components/kserve/resources/servicemesh/routing/istio-ingress-gateway.tmpl.yaml b/controllers/components/kserve/resources/servicemesh/routing/istio-ingress-gateway.tmpl.yaml similarity index 100% rename from components/kserve/resources/servicemesh/routing/istio-ingress-gateway.tmpl.yaml rename to controllers/components/kserve/resources/servicemesh/routing/istio-ingress-gateway.tmpl.yaml diff --git a/components/kserve/resources/servicemesh/routing/istio-kserve-local-gateway.tmpl.yaml b/controllers/components/kserve/resources/servicemesh/routing/istio-kserve-local-gateway.tmpl.yaml similarity index 100% rename from components/kserve/resources/servicemesh/routing/istio-kserve-local-gateway.tmpl.yaml rename to controllers/components/kserve/resources/servicemesh/routing/istio-kserve-local-gateway.tmpl.yaml diff --git a/components/kserve/resources/servicemesh/routing/istio-local-gateway.yaml b/controllers/components/kserve/resources/servicemesh/routing/istio-local-gateway.yaml similarity index 100% rename from components/kserve/resources/servicemesh/routing/istio-local-gateway.yaml rename to controllers/components/kserve/resources/servicemesh/routing/istio-local-gateway.yaml diff --git a/components/kserve/resources/servicemesh/routing/kserve-local-gateway-svc.tmpl.yaml b/controllers/components/kserve/resources/servicemesh/routing/kserve-local-gateway-svc.tmpl.yaml similarity index 100% rename from components/kserve/resources/servicemesh/routing/kserve-local-gateway-svc.tmpl.yaml rename to controllers/components/kserve/resources/servicemesh/routing/kserve-local-gateway-svc.tmpl.yaml diff --git a/components/kserve/resources/servicemesh/routing/local-gateway-svc.tmpl.yaml b/controllers/components/kserve/resources/servicemesh/routing/local-gateway-svc.tmpl.yaml similarity index 100% rename from components/kserve/resources/servicemesh/routing/local-gateway-svc.tmpl.yaml rename to controllers/components/kserve/resources/servicemesh/routing/local-gateway-svc.tmpl.yaml diff --git a/components/kserve/resources/serving-install/knative-serving.tmpl.yaml b/controllers/components/kserve/resources/serving-install/knative-serving.tmpl.yaml similarity index 100% rename from components/kserve/resources/serving-install/knative-serving.tmpl.yaml rename to controllers/components/kserve/resources/serving-install/knative-serving.tmpl.yaml diff --git a/components/kserve/resources/serving-install/service-mesh-subscription.tmpl.yaml b/controllers/components/kserve/resources/serving-install/service-mesh-subscription.tmpl.yaml similarity index 100% rename from components/kserve/resources/serving-install/service-mesh-subscription.tmpl.yaml rename to controllers/components/kserve/resources/serving-install/service-mesh-subscription.tmpl.yaml diff --git a/components/kserve/resources/serving-net-istio-secret-filtering.patch.tmpl.yaml b/controllers/components/kserve/resources/serving-net-istio-secret-filtering.patch.tmpl.yaml similarity index 100% rename from components/kserve/resources/serving-net-istio-secret-filtering.patch.tmpl.yaml rename to controllers/components/kserve/resources/serving-net-istio-secret-filtering.patch.tmpl.yaml diff --git a/controllers/components/modelregistry/modelregistry_controller_actions.go b/controllers/components/modelregistry/modelregistry_controller_actions.go index fdce8cfcca3..5be01ee830d 100644 --- a/controllers/components/modelregistry/modelregistry_controller_actions.go +++ b/controllers/components/modelregistry/modelregistry_controller_actions.go @@ -97,7 +97,7 @@ func configureDependencies(ctx context.Context, rr *odhtypes.ReconciliationReque // Namespace - if err := rr.AddResource( + if err := rr.AddResources( &corev1.Namespace{ ObjectMeta: metav1.ObjectMeta{ Name: mr.Spec.RegistriesNamespace, @@ -115,7 +115,7 @@ func configureDependencies(ctx context.Context, rr *odhtypes.ReconciliationReque return fmt.Errorf("failed to find default ingress secret for model registry: %w", err) } - if err := rr.AddResource( + if err := rr.AddResources( &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ Name: DefaultModelRegistryCert, diff --git a/controllers/components/workbenches/workbenches_controller_actions.go b/controllers/components/workbenches/workbenches_controller_actions.go index 7712beb8dc0..9832d1ed5ff 100644 --- a/controllers/components/workbenches/workbenches_controller_actions.go +++ b/controllers/components/workbenches/workbenches_controller_actions.go @@ -96,7 +96,7 @@ func configureDependencies(ctx context.Context, rr *odhtypes.ReconciliationReque if platform == cluster.SelfManagedRhods || platform == cluster.ManagedRhods { // Intentionally leaving the ownership unset for this namespace. // Specifying this label triggers its deletion when the operator is uninstalled. - if err := rr.AddResource(&corev1.Namespace{ + if err := rr.AddResources(&corev1.Namespace{ ObjectMeta: metav1.ObjectMeta{ Name: cluster.DefaultNotebooksNamespace, Labels: map[string]string{ diff --git a/controllers/datasciencecluster/datasciencecluster_controller.go b/controllers/datasciencecluster/datasciencecluster_controller.go index 9ebe86ef5ed..2254913b2e7 100644 --- a/controllers/datasciencecluster/datasciencecluster_controller.go +++ b/controllers/datasciencecluster/datasciencecluster_controller.go @@ -495,6 +495,7 @@ func (r *DataScienceClusterReconciler) SetupWithManager(ctx context.Context, mgr Owns(&componentsv1.CodeFlare{}). Owns(&componentsv1.TrainingOperator{}). Owns(&componentsv1.DataSciencePipelines{}). + Owns(&componentsv1.Kserve{}). Owns( &corev1.ServiceAccount{}, builder.WithPredicates(saPredicates), diff --git a/controllers/datasciencecluster/kubebuilder_rbac.go b/controllers/datasciencecluster/kubebuilder_rbac.go index 9ca599a13ca..a09ec4dae7d 100644 --- a/controllers/datasciencecluster/kubebuilder_rbac.go +++ b/controllers/datasciencecluster/kubebuilder_rbac.go @@ -162,7 +162,7 @@ package datasciencecluster //+kubebuilder:rbac:groups=components.opendatahub.io,resources=codeflares/status,verbs=get;update;patch //+kubebuilder:rbac:groups=components.opendatahub.io,resources=codeflares/finalizers,verbs=update -// TODO: Kserve +// Kserve // +kubebuilder:rbac:groups=components.opendatahub.io,resources=kserves,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups=components.opendatahub.io,resources=kserves/status,verbs=get;update;patch // +kubebuilder:rbac:groups=components.opendatahub.io,resources=kserves/finalizers,verbs=update diff --git a/controllers/status/status.go b/controllers/status/status.go index f7b5d533752..9dc84a8aa58 100644 --- a/controllers/status/status.go +++ b/controllers/status/status.go @@ -92,6 +92,12 @@ const ( const ( ServiceMeshNotConfiguredReason = "ServiceMeshNotConfigured" ServiceMeshNotConfiguredMessage = "ServiceMesh needs to be set to 'Managed' in DSCI CR" + + ServiceMeshOperatorNotInstalledReason = "ServiceMeshOperatorNotInstalled" + ServiceMeshOperatorNotInstalledMessage = "ServiceMesh operator must be installed for this component's configuration" + + ServerlessOperatorNotInstalledReason = "ServerlessOperatorNotInstalled" + ServerlessOperatorNotInstalledMessage = "Serverless operator must be installed for this component's configuration" ) const ( diff --git a/controllers/webhook/webhook_suite_test.go b/controllers/webhook/webhook_suite_test.go index 8c1ca00a5a6..519de53774e 100644 --- a/controllers/webhook/webhook_suite_test.go +++ b/controllers/webhook/webhook_suite_test.go @@ -45,7 +45,6 @@ import ( dscv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/datasciencecluster/v1" dsciv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/dscinitialization/v1" componentsold "github.com/opendatahub-io/opendatahub-operator/v2/components" - "github.com/opendatahub-io/opendatahub-operator/v2/components/kserve" "github.com/opendatahub-io/opendatahub-operator/v2/components/modelmeshserving" modelregistry2 "github.com/opendatahub-io/opendatahub-operator/v2/controllers/components/modelregistry" "github.com/opendatahub-io/opendatahub-operator/v2/controllers/webhook" @@ -277,8 +276,8 @@ func newDSC(name string, namespace string) *dscv1.DataScienceCluster { ManagementState: operatorv1.Removed, }, }, - Kserve: kserve.Kserve{ - Component: componentsold.Component{ + Kserve: componentsv1.DSCKserve{ + ManagementSpec: components.ManagementSpec{ ManagementState: operatorv1.Removed, }, }, diff --git a/docs/api-overview.md b/docs/api-overview.md index 8a433e461ca..fba56e59eba 100644 --- a/docs/api-overview.md +++ b/docs/api-overview.md @@ -180,6 +180,25 @@ _Appears in:_ | `devFlags` _[DevFlags](#devflags)_ | Add developer fields | | | +#### DSCKserve + + + +DSCKserve contains all the configuration exposed in DSC instance for Kserve component + + + +_Appears in:_ +- [Components](#components) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `managementState` _[ManagementState](#managementstate)_ | Set to one of the following values:

- "Managed" : the operator is actively managing the component and trying to keep it active.
It will only upgrade the component if it is safe to do so

- "Removed" : the operator is actively managing the component and will not install it,
or if it is installed, the operator will try to remove it | | Enum: [Managed Removed]
| +| `devFlags` _[DevFlags](#devflags)_ | Add developer fields | | | +| `serving` _[ServingSpec](#servingspec)_ | Serving configures the KNative-Serving stack used for model serving. A Service
Mesh (Istio) is prerequisite, since it is used as networking layer. | | | +| `defaultDeploymentMode` _[DefaultDeploymentMode](#defaultdeploymentmode)_ | Configures the default deployment mode for Kserve. This can be set to 'Serverless' or 'RawDeployment'.
The value specified in this field will be used to set the default deployment mode in the 'inferenceservice-config' configmap for Kserve.
This field is optional. If no default deployment mode is specified, Kserve will use Serverless mode. | | Enum: [Serverless RawDeployment]
Pattern: `^(Serverless\|RawDeployment)$`
| + + #### DSCKueue @@ -487,6 +506,26 @@ _Appears in:_ | `observedGeneration` _integer_ | | | | +#### DefaultDeploymentMode + +_Underlying type:_ _string_ + + + +_Validation:_ +- Pattern: `^(Serverless|RawDeployment)$` + +_Appears in:_ +- [DSCKserve](#dsckserve) +- [KserveCommonSpec](#kservecommonspec) +- [KserveSpec](#kservespec) + +| Field | Description | +| --- | --- | +| `Serverless` | Serverless will be used as the default deployment mode for Kserve. This requires Serverless and ServiceMesh operators configured as dependencies.
| +| `RawDeployment` | RawDeployment will be used as the default deployment mode for Kserve.
| + + #### Kserve @@ -509,6 +548,25 @@ _Appears in:_ | `status` _[KserveStatus](#kservestatus)_ | | | | +#### KserveCommonSpec + + + +KserveCommonSpec spec defines the shared desired state of Kserve + + + +_Appears in:_ +- [DSCKserve](#dsckserve) +- [KserveSpec](#kservespec) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `devFlags` _[DevFlags](#devflags)_ | Add developer fields | | | +| `serving` _[ServingSpec](#servingspec)_ | Serving configures the KNative-Serving stack used for model serving. A Service
Mesh (Istio) is prerequisite, since it is used as networking layer. | | | +| `defaultDeploymentMode` _[DefaultDeploymentMode](#defaultdeploymentmode)_ | Configures the default deployment mode for Kserve. This can be set to 'Serverless' or 'RawDeployment'.
The value specified in this field will be used to set the default deployment mode in the 'inferenceservice-config' configmap for Kserve.
This field is optional. If no default deployment mode is specified, Kserve will use Serverless mode. | | Enum: [Serverless RawDeployment]
Pattern: `^(Serverless\|RawDeployment)$`
| + + #### KserveList @@ -542,7 +600,9 @@ _Appears in:_ | Field | Description | Default | Validation | | --- | --- | --- | --- | -| `foo` _string_ | Foo is an example field of Kserve. Edit kserve_types.go to remove/update | | | +| `devFlags` _[DevFlags](#devflags)_ | Add developer fields | | | +| `serving` _[ServingSpec](#servingspec)_ | Serving configures the KNative-Serving stack used for model serving. A Service
Mesh (Istio) is prerequisite, since it is used as networking layer. | | | +| `defaultDeploymentMode` _[DefaultDeploymentMode](#defaultdeploymentmode)_ | Configures the default deployment mode for Kserve. This can be set to 'Serverless' or 'RawDeployment'.
The value specified in this field will be used to set the default deployment mode in the 'inferenceservice-config' configmap for Kserve.
This field is optional. If no default deployment mode is specified, Kserve will use Serverless mode. | | Enum: [Serverless RawDeployment]
Pattern: `^(Serverless\|RawDeployment)$`
| #### KserveStatus @@ -1215,7 +1275,6 @@ Component struct defines the basis for each OpenDataHub component configuration. _Appears in:_ -- [Kserve](#kserve) - [ModelMeshServing](#modelmeshserving) | Field | Description | Default | Validation | @@ -1260,6 +1319,7 @@ _Appears in:_ - [DSCCodeFlare](#dsccodeflare) - [DSCDashboard](#dscdashboard) - [DSCDataSciencePipelines](#dscdatasciencepipelines) +- [DSCKserve](#dsckserve) - [DSCKueue](#dsckueue) - [DSCModelRegistry](#dscmodelregistry) - [DSCRay](#dscray) @@ -1270,6 +1330,8 @@ _Appears in:_ - [DashboardSpec](#dashboardspec) - [DataSciencePipelinesCommonSpec](#datasciencepipelinescommonspec) - [DataSciencePipelinesSpec](#datasciencepipelinesspec) +- [KserveCommonSpec](#kservecommonspec) +- [KserveSpec](#kservespec) - [KueueCommonSpec](#kueuecommonspec) - [KueueSpec](#kueuespec) - [ModelRegistryCommonSpec](#modelregistrycommonspec) @@ -1301,6 +1363,7 @@ _Appears in:_ - [DSCCodeFlare](#dsccodeflare) - [DSCDashboard](#dscdashboard) - [DSCDataSciencePipelines](#dscdatasciencepipelines) +- [DSCKserve](#dsckserve) - [DSCKueue](#dsckueue) - [DSCModelRegistry](#dscmodelregistry) - [DSCRay](#dscray) @@ -1348,45 +1411,6 @@ _Appears in:_ -## datasciencecluster.opendatahub.io/kserve - -Package kserve provides utility functions to config Kserve as the Controller for serving ML models on arbitrary frameworks - - - -#### DefaultDeploymentMode - -_Underlying type:_ _string_ - - - -_Validation:_ -- Pattern: `^(Serverless|RawDeployment)$` - -_Appears in:_ -- [Kserve](#kserve) - - - -#### Kserve - - - -Kserve struct holds the configuration for the Kserve component. - - - -_Appears in:_ -- [Components](#components) - -| Field | Description | Default | Validation | -| --- | --- | --- | --- | -| `Component` _[Component](#component)_ | | | | -| `serving` _[ServingSpec](#servingspec)_ | Serving configures the KNative-Serving stack used for model serving. A Service
Mesh (Istio) is prerequisite, since it is used as networking layer. | | | -| `defaultDeploymentMode` _[DefaultDeploymentMode](#defaultdeploymentmode)_ | Configures the default deployment mode for Kserve. This can be set to 'Serverless' or 'RawDeployment'.
The value specified in this field will be used to set the default deployment mode in the 'inferenceservice-config' configmap for Kserve.
This field is optional. If no default deployment mode is specified, Kserve will use Serverless mode. | | Enum: [Serverless RawDeployment]
Pattern: `^(Serverless\|RawDeployment)$`
| - - - ## datasciencecluster.opendatahub.io/modelmeshserving Package modelmeshserving provides utility functions to config MoModelMesh, a general-purpose model serving management/routing layer @@ -1488,7 +1512,7 @@ _Appears in:_ | `workbenches` _[DSCWorkbenches](#dscworkbenches)_ | Workbenches component configuration. | | | | `modelmeshserving` _[ModelMeshServing](#modelmeshserving)_ | ModelMeshServing component configuration.
Does not support enabled Kserve at the same time | | | | `datasciencepipelines` _[DSCDataSciencePipelines](#dscdatasciencepipelines)_ | DataServicePipeline component configuration.
Require OpenShift Pipelines Operator to be installed before enable component | | | -| `kserve` _[Kserve](#kserve)_ | Kserve component configuration.
Require OpenShift Serverless and OpenShift Service Mesh Operators to be installed before enable component
Does not support enabled ModelMeshServing at the same time | | | +| `kserve` _[DSCKserve](#dsckserve)_ | Kserve component configuration.
Require OpenShift Serverless and OpenShift Service Mesh Operators to be installed before enable component
Does not support enabled ModelMeshServing at the same time | | | | `kueue` _[DSCKueue](#dsckueue)_ | Kueue component configuration. | | | | `codeflare` _[DSCCodeFlare](#dsccodeflare)_ | CodeFlare component configuration.
If CodeFlare Operator has been installed in the cluster, it should be uninstalled first before enabled component. | | | | `ray` _[DSCRay](#dscray)_ | Ray component configuration. | | | @@ -1635,7 +1659,9 @@ bindings with the Service Mesh. _Appears in:_ -- [Kserve](#kserve) +- [DSCKserve](#dsckserve) +- [KserveCommonSpec](#kservecommonspec) +- [KserveSpec](#kservespec) | Field | Description | Default | Validation | | --- | --- | --- | --- | diff --git a/go.mod b/go.mod index e1cce61009e..d541a927641 100644 --- a/go.mod +++ b/go.mod @@ -27,6 +27,7 @@ require ( golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 gopkg.in/op/go-logging.v1 v1.0.0-20160211212156-b2cb9fa56473 gopkg.in/yaml.v2 v2.4.0 + gopkg.in/yaml.v3 v3.0.1 k8s.io/api v0.29.2 k8s.io/apiextensions-apiserver v0.29.2 k8s.io/apimachinery v0.29.2 @@ -109,7 +110,6 @@ require ( google.golang.org/protobuf v1.34.2 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect - gopkg.in/yaml.v3 v3.0.1 // indirect k8s.io/component-base v0.29.2 // indirect k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect diff --git a/main.go b/main.go index b031e2fe9ad..5b7d4189a5b 100644 --- a/main.go +++ b/main.go @@ -30,6 +30,7 @@ import ( operatorv1 "github.com/openshift/api/operator/v1" routev1 "github.com/openshift/api/route/v1" securityv1 "github.com/openshift/api/security/v1" + templatev1 "github.com/openshift/api/template/v1" userv1 "github.com/openshift/api/user/v1" ofapiv1alpha1 "github.com/operator-framework/api/pkg/operators/v1alpha1" ofapiv2 "github.com/operator-framework/api/pkg/operators/v2" @@ -80,6 +81,7 @@ import ( _ "github.com/opendatahub-io/opendatahub-operator/v2/controllers/components/codeflare" _ "github.com/opendatahub-io/opendatahub-operator/v2/controllers/components/dashboard" _ "github.com/opendatahub-io/opendatahub-operator/v2/controllers/components/datasciencepipelines" + _ "github.com/opendatahub-io/opendatahub-operator/v2/controllers/components/kserve" _ "github.com/opendatahub-io/opendatahub-operator/v2/controllers/components/kueue" _ "github.com/opendatahub-io/opendatahub-operator/v2/controllers/components/ray" _ "github.com/opendatahub-io/opendatahub-operator/v2/controllers/components/trainingoperator" @@ -121,6 +123,7 @@ func init() { //nolint:gochecknoinits utilruntime.Must(operatorv1.Install(scheme)) utilruntime.Must(consolev1.AddToScheme(scheme)) utilruntime.Must(securityv1.Install(scheme)) + utilruntime.Must(templatev1.Install(scheme)) } func initComponents(_ context.Context, p cluster.Platform) error { diff --git a/pkg/cluster/gvk/gvk.go b/pkg/cluster/gvk/gvk.go index 3222d5b20f3..eda392d435f 100644 --- a/pkg/cluster/gvk/gvk.go +++ b/pkg/cluster/gvk/gvk.go @@ -181,4 +181,22 @@ var ( Version: coordinationv1.SchemeGroupVersion.Version, Kind: "Lease", } + + EnvoyFilter = schema.GroupVersionKind{ + Group: "networking.istio.io", + Version: "v1alpha3", + Kind: "EnvoyFilter", + } + + AuthorizationPolicy = schema.GroupVersionKind{ + Group: "security.istio.io", + Version: "v1", + Kind: "AuthorizationPolicy", + } + + Gateway = schema.GroupVersionKind{ + Group: "networking.istio.io", + Version: "v1beta1", + Kind: "Gateway", + } ) diff --git a/pkg/controller/actions/deploy/action_deploy.go b/pkg/controller/actions/deploy/action_deploy.go index 2728098e8ff..93f77772a5c 100644 --- a/pkg/controller/actions/deploy/action_deploy.go +++ b/pkg/controller/actions/deploy/action_deploy.go @@ -327,7 +327,6 @@ func (a *Action) patch( if err := RemoveDeploymentsResources(obj); err != nil { return nil, fmt.Errorf("failed to apply allow list to Deployment %s/%s: %w", obj.GetNamespace(), obj.GetName(), err) } - default: // do nothing break @@ -392,6 +391,18 @@ func (a *Action) apply( if err := MergeDeployments(old, obj); err != nil { return nil, fmt.Errorf("failed to merge Deployment %s/%s: %w", obj.GetNamespace(), obj.GetName(), err) } + case gvk.ClusterRole: + // For ClusterRole, if AggregationRule is set, then the Rules are controller managed + // and direct changes to Rules will be stomped by the controller. This also happen if + // the rules are set to an empty slice or nil hence we are removing the rules field + // if the ClusterRole is set to be an aggregation role. + _, found, err := unstructured.NestedFieldNoCopy(obj.Object, "aggregationRule") + if err != nil { + return nil, err + } + if found { + unstructured.RemoveNestedField(obj.Object, "rules") + } default: // do nothing break diff --git a/pkg/controller/actions/deploy/action_deploy_test.go b/pkg/controller/actions/deploy/action_deploy_test.go index e441da7559c..b5128dd3973 100644 --- a/pkg/controller/actions/deploy/action_deploy_test.go +++ b/pkg/controller/actions/deploy/action_deploy_test.go @@ -2,29 +2,40 @@ package deploy_test import ( "context" + "path/filepath" "strconv" "strings" "testing" "github.com/blang/semver/v4" + "github.com/onsi/gomega/gstruct" "github.com/operator-framework/api/pkg/lib/version" "github.com/rs/xid" appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + apimachinery "k8s.io/apimachinery/pkg/types" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/envtest" componentsv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/components/v1" dscv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/datasciencecluster/v1" dsciv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/dscinitialization/v1" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/actions/deploy" + odhCli "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/client" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/types" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/metadata/annotations" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/metadata/labels" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/resources" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/utils/test/fakeclient" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/utils/test/matchers/jq" + "github.com/opendatahub-io/opendatahub-operator/v2/tests/envtestutil" . "github.com/onsi/gomega" ) @@ -239,3 +250,127 @@ func TestDeployNotOwnedCreate(t *testing.T) { jq.Match(`.spec.strategy.type == "%s"`, appsv1.RollingUpdateDeploymentStrategyType), )) } + +func TestDeployClusterRole(t *testing.T) { + g := NewWithT(t) + s := runtime.NewScheme() + + utilruntime.Must(corev1.AddToScheme(s)) + utilruntime.Must(appsv1.AddToScheme(s)) + utilruntime.Must(apiextensionsv1.AddToScheme(s)) + utilruntime.Must(componentsv1.AddToScheme(s)) + utilruntime.Must(rbacv1.AddToScheme(s)) + + projectDir, err := envtestutil.FindProjectRoot() + g.Expect(err).NotTo(HaveOccurred()) + + envTest := &envtest.Environment{ + CRDInstallOptions: envtest.CRDInstallOptions{ + Scheme: s, + Paths: []string{ + filepath.Join(projectDir, "config", "crd", "bases"), + }, + ErrorIfPathMissing: true, + CleanUpAfterUse: false, + }, + } + + t.Cleanup(func() { + _ = envTest.Stop() + }) + + cfg, err := envTest.Start() + g.Expect(err).NotTo(HaveOccurred()) + + envTestClient, err := client.New(cfg, client.Options{Scheme: s}) + g.Expect(err).NotTo(HaveOccurred()) + + cli, err := odhCli.NewFromConfig(cfg, envTestClient) + g.Expect(err).NotTo(HaveOccurred()) + + t.Run("aggregation", func(t *testing.T) { + ctx := context.Background() + name := xid.New().String() + + deployClusterRoles(t, ctx, cli, rbacv1.ClusterRole{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + Rules: []rbacv1.PolicyRule{{ + Verbs: []string{"*"}, + Resources: []string{"*"}, + APIGroups: []string{"*"}, + }}, + AggregationRule: &rbacv1.AggregationRule{ + ClusterRoleSelectors: []metav1.LabelSelector{{ + MatchLabels: map[string]string{"foo": "bar"}, + }}, + }, + }) + + out := rbacv1.ClusterRole{} + err = cli.Get(ctx, client.ObjectKey{Name: name}, &out) + g.Expect(err).NotTo(HaveOccurred()) + + g.Expect(out).To(gstruct.MatchFields(gstruct.IgnoreExtras, gstruct.Fields{ + "Rules": BeEmpty(), + })) + }) + + t.Run("no aggregation", func(t *testing.T) { + ctx := context.Background() + name := xid.New().String() + + deployClusterRoles(t, ctx, cli, rbacv1.ClusterRole{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + Rules: []rbacv1.PolicyRule{{ + Verbs: []string{"*"}, + Resources: []string{"*"}, + APIGroups: []string{"*"}, + }}, + }) + + out := rbacv1.ClusterRole{} + err = cli.Get(ctx, client.ObjectKey{Name: name}, &out) + g.Expect(err).NotTo(HaveOccurred()) + + g.Expect(out).To(gstruct.MatchFields(gstruct.IgnoreExtras, gstruct.Fields{ + "Rules": HaveLen(1), + })) + }) +} + +func deployClusterRoles(t *testing.T, ctx context.Context, cli *odhCli.Client, roles ...rbacv1.ClusterRole) { + t.Helper() + + g := NewWithT(t) + + rr := types.ReconciliationRequest{ + Client: cli, + DSCI: &dsciv1.DSCInitialization{Spec: dsciv1.DSCInitializationSpec{ + ApplicationsNamespace: xid.New().String(), + }}, + DSC: &dscv1.DataScienceCluster{}, + Instance: &componentsv1.Dashboard{ + ObjectMeta: metav1.ObjectMeta{ + Generation: 1, + UID: apimachinery.UID(xid.New().String()), + }, + }, + Release: cluster.Release{ + Name: cluster.OpenDataHub, + Version: version.OperatorVersion{Version: semver.Version{ + Major: 1, Minor: 2, Patch: 3, + }}}, + } + + for i := range roles { + err := rr.AddResources(roles[i].DeepCopy()) + g.Expect(err).ShouldNot(HaveOccurred()) + } + + err := deploy.NewAction()(ctx, &rr) + g.Expect(err).ShouldNot(HaveOccurred()) +} diff --git a/pkg/controller/client/client.go b/pkg/controller/client/client.go index b38d2028411..c852246d0cb 100644 --- a/pkg/controller/client/client.go +++ b/pkg/controller/client/client.go @@ -72,7 +72,7 @@ func (c *Client) Apply(ctx context.Context, in ctrlCli.Object, opts ...ctrlCli.P err = c.Client.Patch(ctx, u, ctrlCli.Apply, opts...) if err != nil { - return fmt.Errorf("unable to pactch object %s: %w", u, err) + return fmt.Errorf("unable to patch object %s: %w", u, err) } // Write back the modified object so callers can access the patched object. diff --git a/pkg/controller/predicates/clusterrole/clusterrole.go b/pkg/controller/predicates/clusterrole/clusterrole.go new file mode 100644 index 00000000000..b4c1d818819 --- /dev/null +++ b/pkg/controller/predicates/clusterrole/clusterrole.go @@ -0,0 +1,41 @@ +package clusterrole + +import ( + "reflect" + + rbacv1 "k8s.io/api/rbac/v1" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/predicate" +) + +// IgnoreIfAggregationRule is a watch predicate that can be used with +// ClusterRoles to ignore the rules field on update if aggregationRule is set. +func IgnoreIfAggregationRule() predicate.Predicate { + return predicate.Funcs{ + UpdateFunc: func(e event.UpdateEvent) bool { + oldClusterRole, ok := e.ObjectOld.DeepCopyObject().(*rbacv1.ClusterRole) + if !ok { + return true + } + newClusterRole, ok := e.ObjectNew.DeepCopyObject().(*rbacv1.ClusterRole) + if !ok { + return true + } + + // if aggregationRule is set, then the rules are set by k8s based on other + // ClusterRoles matching a label selector, so we shouldn't try to reset that + // back to empty + if newClusterRole.AggregationRule != nil { + oldClusterRole.Rules = nil + newClusterRole.Rules = nil + } + + oldClusterRole.SetManagedFields(nil) + newClusterRole.SetManagedFields(nil) + oldClusterRole.SetResourceVersion("") + newClusterRole.SetResourceVersion("") + + return !reflect.DeepEqual(oldClusterRole, newClusterRole) + }, + } +} diff --git a/pkg/controller/predicates/hash/hash.go b/pkg/controller/predicates/hash/hash.go new file mode 100644 index 00000000000..b2157793912 --- /dev/null +++ b/pkg/controller/predicates/hash/hash.go @@ -0,0 +1,38 @@ +package hash + +import ( + "bytes" + + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/predicate" + + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/resources" +) + +// Updated is a watch predicate that can be used to ignore updates +// of resources if they're considered equal after hashing by resources.Hash(). +func Updated() predicate.Predicate { + return predicate.Funcs{ + UpdateFunc: func(e event.UpdateEvent) bool { + oldUnstructured, err := resources.ToUnstructured(e.ObjectOld.DeepCopyObject()) + if err != nil { + return true + } + newUnstructured, err := resources.ToUnstructured(e.ObjectNew.DeepCopyObject()) + if err != nil { + return true + } + + oldHash, err := resources.Hash(oldUnstructured) + if err != nil { + return true + } + newHash, err := resources.Hash(newUnstructured) + if err != nil { + return true + } + + return !bytes.Equal(oldHash, newHash) + }, + } +} diff --git a/pkg/controller/reconciler/component_reconciler_actions.go b/pkg/controller/reconciler/component_reconciler_actions.go index c85b79fc35f..5d32508734f 100644 --- a/pkg/controller/reconciler/component_reconciler_actions.go +++ b/pkg/controller/reconciler/component_reconciler_actions.go @@ -6,22 +6,23 @@ import ( "strings" "k8s.io/apimachinery/pkg/runtime/schema" - ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/controller" - "sigs.k8s.io/controller-runtime/pkg/source" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/predicate" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/actions" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/types" ) +type dynamicWatchFn func(client.Object, handler.EventHandler, ...predicate.Predicate) error + type dynamicWatchAction struct { - mgr ctrl.Manager - controller controller.Controller - watches []watchInput - watched map[schema.GroupVersionKind]struct{} + fn dynamicWatchFn + watches []watchInput + watched map[schema.GroupVersionKind]struct{} } -func (a *dynamicWatchAction) run(_ context.Context, rr *types.ReconciliationRequest) error { +func (a *dynamicWatchAction) run(ctx context.Context, rr *types.ReconciliationRequest) error { controllerName := strings.ToLower(rr.Instance.GetObjectKind().GroupVersionKind().Kind) for i := range a.watches { @@ -33,28 +34,38 @@ func (a *dynamicWatchAction) run(_ context.Context, rr *types.ReconciliationRequ continue } - err := a.controller.Watch( - source.Kind(a.mgr.GetCache(), w.object), - w.eventHandler, - w.predicates..., - ) + ok := a.shouldWatch(ctx, w, rr) + if !ok { + continue + } + err := a.fn(w.object, w.eventHandler, w.predicates...) if err != nil { return fmt.Errorf("failed to create watcher for %s: %w", w.object.GetObjectKind().GroupVersionKind(), err) } - DynamicWatchResourcesTotal.WithLabelValues(controllerName).Inc() a.watched[gvk] = struct{}{} + DynamicWatchResourcesTotal.WithLabelValues(controllerName).Inc() } return nil } -func newDynamicWatchAction(mgr ctrl.Manager, controller controller.Controller, watches []watchInput) actions.Fn { +func (a *dynamicWatchAction) shouldWatch(ctx context.Context, in watchInput, rr *types.ReconciliationRequest) bool { + for pi := range in.dynamicPred { + ok := in.dynamicPred[pi](ctx, rr) + if !ok { + return false + } + } + + return true +} + +func newDynamicWatch(fn dynamicWatchFn, watches []watchInput) *dynamicWatchAction { action := dynamicWatchAction{ - mgr: mgr, - controller: controller, - watched: map[schema.GroupVersionKind]struct{}{}, + fn: fn, + watched: map[schema.GroupVersionKind]struct{}{}, } for i := range watches { @@ -66,5 +77,10 @@ func newDynamicWatchAction(mgr ctrl.Manager, controller controller.Controller, w action.watches = append(action.watches, watches[i]) } + return &action +} + +func newDynamicWatchAction(fn dynamicWatchFn, watches []watchInput) actions.Fn { + action := newDynamicWatch(fn, watches) return action.run } diff --git a/pkg/controller/reconciler/component_reconciler_actions_test.go b/pkg/controller/reconciler/component_reconciler_actions_test.go new file mode 100644 index 00000000000..4ab20849f94 --- /dev/null +++ b/pkg/controller/reconciler/component_reconciler_actions_test.go @@ -0,0 +1,255 @@ +//nolint:testpackage +package reconciler + +import ( + "context" + "testing" + + gomegaTypes "github.com/onsi/gomega/types" + "github.com/prometheus/client_golang/prometheus/testutil" + "github.com/rs/xid" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/predicate" + + "github.com/opendatahub-io/opendatahub-operator/v2/apis/components" + componentsv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/components/v1" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster/gvk" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/types" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/resources" + + . "github.com/onsi/gomega" +) + +func TestDynamicWatchAction_Run(t *testing.T) { + tests := []struct { + name string + object components.ComponentObject + preds []DynamicPredicate + errMatcher gomegaTypes.GomegaMatcher + cntMatcher gomegaTypes.GomegaMatcher + keyMatcher gomegaTypes.GomegaMatcher + }{ + { + name: "should register a watcher if no predicates", + object: &componentsv1.Dashboard{TypeMeta: metav1.TypeMeta{Kind: gvk.Dashboard.Kind}}, + preds: []DynamicPredicate{}, + errMatcher: Not(HaveOccurred()), + cntMatcher: BeNumerically("==", 1), + keyMatcher: HaveKey(gvk.ConfigMap), + }, + + { + name: "should register a watcher when the predicate evaluate to true", + object: &componentsv1.Dashboard{TypeMeta: metav1.TypeMeta{Kind: gvk.Dashboard.Kind}}, + preds: []DynamicPredicate{ + func(_ context.Context, rr *types.ReconciliationRequest) bool { + return true + }, + }, + errMatcher: Not(HaveOccurred()), + cntMatcher: BeNumerically("==", 1), + keyMatcher: HaveKey(gvk.ConfigMap), + }, + + { + name: "should register a watcher when all predicates evaluate to true", + object: &componentsv1.Dashboard{ + TypeMeta: metav1.TypeMeta{ + Kind: gvk.Dashboard.Kind, + }, + ObjectMeta: metav1.ObjectMeta{ + Generation: 1, + ResourceVersion: xid.New().String(), + }, + }, + preds: []DynamicPredicate{ + func(_ context.Context, rr *types.ReconciliationRequest) bool { + return rr.Instance.GetGeneration() > 0 + }, + func(_ context.Context, rr *types.ReconciliationRequest) bool { + return rr.Instance.GetResourceVersion() != "" + }, + }, + errMatcher: Not(HaveOccurred()), + cntMatcher: BeNumerically("==", 1), + keyMatcher: HaveKey(gvk.ConfigMap), + }, + + { + name: "should not register a watcher the predicate returns false", + object: &componentsv1.Dashboard{TypeMeta: metav1.TypeMeta{Kind: gvk.Dashboard.Kind}}, + preds: []DynamicPredicate{ + func(_ context.Context, rr *types.ReconciliationRequest) bool { + return false + }, + }, + errMatcher: Not(HaveOccurred()), + cntMatcher: BeNumerically("==", 0), + keyMatcher: BeEmpty(), + }, + + { + name: "should not register a watcher when a predicate returns false", + object: &componentsv1.Dashboard{ + TypeMeta: metav1.TypeMeta{ + Kind: gvk.Dashboard.Kind, + }, + ObjectMeta: metav1.ObjectMeta{ + Generation: 1, + ResourceVersion: "", + }, + }, + preds: []DynamicPredicate{ + func(_ context.Context, rr *types.ReconciliationRequest) bool { + return rr.Instance.GetGeneration() > 0 + }, + func(_ context.Context, rr *types.ReconciliationRequest) bool { + return rr.Instance.GetResourceVersion() != "" + }, + }, + errMatcher: Not(HaveOccurred()), + cntMatcher: BeNumerically("==", 0), + keyMatcher: BeEmpty(), + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + g := NewWithT(t) + ctx := context.Background() + + watches := []watchInput{{ + object: resources.GvkToUnstructured(gvk.ConfigMap), + dynamic: true, + dynamicPred: test.preds, + }} + + mockFn := func(_ client.Object, _ handler.EventHandler, _ ...predicate.Predicate) error { + return nil + } + + DynamicWatchResourcesTotal.Reset() + DynamicWatchResourcesTotal.WithLabelValues("dashboard").Add(0) + + action := newDynamicWatch(mockFn, watches) + err := action.run(ctx, &types.ReconciliationRequest{Instance: test.object}) + + if test.errMatcher != nil { + g.Expect(err).To(test.errMatcher) + } + if test.cntMatcher != nil { + g.Expect(testutil.ToFloat64(DynamicWatchResourcesTotal)).To(test.cntMatcher) + } + if test.keyMatcher != nil { + g.Expect(action.watched).Should(test.keyMatcher) + } + }) + } +} + +func TestDynamicWatchAction_Inputs(t *testing.T) { + g := NewWithT(t) + ctx := context.Background() + + mockFn := func(_ client.Object, _ handler.EventHandler, _ ...predicate.Predicate) error { + return nil + } + + DynamicWatchResourcesTotal.Reset() + DynamicWatchResourcesTotal.WithLabelValues("dashboard").Add(0) + + watches := []watchInput{ + { + object: resources.GvkToUnstructured(gvk.Secret), + dynamic: true, + dynamicPred: []DynamicPredicate{func(_ context.Context, rr *types.ReconciliationRequest) bool { + return rr.Instance.GetGeneration() == 0 + }}, + }, + { + object: resources.GvkToUnstructured(gvk.ConfigMap), + dynamic: true, + dynamicPred: []DynamicPredicate{func(_ context.Context, rr *types.ReconciliationRequest) bool { + return rr.Instance.GetGeneration() > 0 + }}, + }, + } + + action := newDynamicWatch(mockFn, watches) + err := action.run(ctx, &types.ReconciliationRequest{Instance: &componentsv1.Dashboard{ + TypeMeta: metav1.TypeMeta{ + Kind: gvk.Dashboard.Kind, + }, + ObjectMeta: metav1.ObjectMeta{ + Generation: 1, + }, + }}) + + g.Expect(err). + ShouldNot(HaveOccurred()) + g.Expect(testutil.ToFloat64(DynamicWatchResourcesTotal)). + Should(BeNumerically("==", 1)) + g.Expect(action.watched). + Should(And( + HaveLen(1), + HaveKey(gvk.ConfigMap)), + ) +} + +func TestDynamicWatchAction_NotTwice(t *testing.T) { + g := NewWithT(t) + ctx := context.Background() + + mockFn := func(_ client.Object, _ handler.EventHandler, _ ...predicate.Predicate) error { + return nil + } + + DynamicWatchResourcesTotal.Reset() + DynamicWatchResourcesTotal.WithLabelValues("dashboard").Add(0) + + watches := []watchInput{ + { + object: resources.GvkToUnstructured(gvk.ConfigMap), + dynamic: true, + dynamicPred: []DynamicPredicate{func(_ context.Context, rr *types.ReconciliationRequest) bool { + return rr.Instance.GetGeneration() > 0 + }}, + }, + } + + action := newDynamicWatch(mockFn, watches) + + err1 := action.run(ctx, &types.ReconciliationRequest{Instance: &componentsv1.Dashboard{ + TypeMeta: metav1.TypeMeta{ + Kind: gvk.Dashboard.Kind, + }, + ObjectMeta: metav1.ObjectMeta{ + Generation: 1, + }, + }}) + + g.Expect(err1). + ShouldNot(HaveOccurred()) + + err2 := action.run(ctx, &types.ReconciliationRequest{Instance: &componentsv1.Dashboard{ + TypeMeta: metav1.TypeMeta{ + Kind: gvk.Dashboard.Kind, + }, + ObjectMeta: metav1.ObjectMeta{ + Generation: 1, + }, + }}) + + g.Expect(err2). + ShouldNot(HaveOccurred()) + + g.Expect(testutil.ToFloat64(DynamicWatchResourcesTotal)). + Should(BeNumerically("==", 1)) + g.Expect(action.watched). + Should(And( + HaveLen(1), + HaveKey(gvk.ConfigMap)), + ) +} diff --git a/pkg/controller/reconciler/component_reconciler_support.go b/pkg/controller/reconciler/component_reconciler_support.go index 3fc2bafd733..12d21f666e3 100644 --- a/pkg/controller/reconciler/component_reconciler_support.go +++ b/pkg/controller/reconciler/component_reconciler_support.go @@ -13,25 +13,27 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/predicate" + "sigs.k8s.io/controller-runtime/pkg/source" "github.com/opendatahub-io/opendatahub-operator/v2/apis/components" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/actions" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/handlers" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/predicates/component" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/predicates/generation" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/types" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/metadata/annotations" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/metadata/labels" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/resources" ) var ( - // defaultPredicate is the default set of predicates associated to + // DefaultPredicate is the default set of predicates associated to // resources when there is no specific predicate configured via the // builder. // // It would trigger a reconciliation if either the generation or // metadata (labels, annotations) have changed. - defaultPredicate = predicate.Or( + DefaultPredicate = predicate.Or( generation.New(), predicate.LabelChangedPredicate{}, predicate.AnnotationChangedPredicate{}, @@ -44,12 +46,15 @@ type forInput struct { gvk schema.GroupVersionKind } +type DynamicPredicate func(context.Context, *types.ReconciliationRequest) bool + type watchInput struct { object client.Object eventHandler handler.EventHandler predicates []predicate.Predicate owned bool dynamic bool + dynamicPred []DynamicPredicate } type WatchOpts func(*watchInput) @@ -72,9 +77,10 @@ func WithEventMapper(value handler.MapFunc) WatchOpts { } } -func Dynamic() WatchOpts { +func Dynamic(predicates ...DynamicPredicate) WatchOpts { return func(a *watchInput) { a.dynamic = true + a.dynamicPred = slices.Clone(predicates) } } @@ -140,7 +146,7 @@ func (b *ComponentReconcilerBuilder) Watches(object client.Object, opts ...Watch if len(in.predicates) == 0 { in.predicates = append(in.predicates, predicate.And( - defaultPredicate, + DefaultPredicate, // use the components.opendatahub.io/part-of label to filter // events not related to the owner type component.ForLabel(labels.ComponentPartOf, strings.ToLower(b.input.gvk.Kind)), @@ -175,7 +181,7 @@ func (b *ComponentReconcilerBuilder) Owns(object client.Object, opts ...WatchOpt } if len(in.predicates) == 0 { - in.predicates = append(in.predicates, defaultPredicate) + in.predicates = append(in.predicates, DefaultPredicate) } b.watches = append(b.watches, in) @@ -264,7 +270,14 @@ func (b *ComponentReconcilerBuilder) Build(_ context.Context) (*ComponentReconci } // internal action - r.AddAction(newDynamicWatchAction(b.mgr, cc, b.watches)) + r.AddAction( + newDynamicWatchAction( + func(obj client.Object, eventHandler handler.EventHandler, predicates ...predicate.Predicate) error { + return cc.Watch(source.Kind(b.mgr.GetCache(), obj), eventHandler, predicates...) + }, + b.watches, + ), + ) return r, nil } diff --git a/pkg/controller/types/types.go b/pkg/controller/types/types.go index bcabd3e4fab..170bf70f4f8 100644 --- a/pkg/controller/types/types.go +++ b/pkg/controller/types/types.go @@ -9,7 +9,6 @@ import ( "github.com/go-logr/logr" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - machineryrt "k8s.io/apimachinery/pkg/runtime" "sigs.k8s.io/controller-runtime/pkg/client" "github.com/opendatahub-io/opendatahub-operator/v2/apis/components" @@ -95,39 +94,71 @@ type ReconciliationRequest struct { Generated bool } -func (rr *ReconciliationRequest) AddResource(in interface{}) error { - if obj, ok := in.(client.Object); ok { - err := rr.normalize(obj) +// AddResources adds one or more resources to the ReconciliationRequest's Resources slice. +// Each provided client.Object is normalized by ensuring it has the appropriate GVK and is +// converted into an unstructured.Unstructured format before being appended to the list. +func (rr *ReconciliationRequest) AddResources(values ...client.Object) error { + for i := range values { + if values[i] == nil { + continue + } + + err := resources.EnsureGroupVersionKind(rr.Client.Scheme(), values[i]) if err != nil { return fmt.Errorf("cannot normalize object: %w", err) } - } - u, err := machineryrt.DefaultUnstructuredConverter.ToUnstructured(in) - if err != nil { - return err - } + u, err := resources.ToUnstructured(values[i]) + if err != nil { + return fmt.Errorf("cannot convert object to Unstructured: %w", err) + } - rr.Resources = append(rr.Resources, unstructured.Unstructured{Object: u}) + rr.Resources = append(rr.Resources, *u) + } return nil } -func (rr *ReconciliationRequest) normalize(obj client.Object) error { - if obj.GetObjectKind().GroupVersionKind().Kind != "" { - return nil +// ForEachResource iterates over each resource in the ReconciliationRequest's Resources slice, +// invoking the provided function `fn` for each resource. The function `fn` takes a pointer to +// an unstructured.Unstructured object and returns a boolean and an error. +// +// The iteration stops early if: +// - `fn` returns an error. +// - `fn` returns `true` as the first return value (`stop`). +func (rr *ReconciliationRequest) ForEachResource(fn func(*unstructured.Unstructured) (bool, error)) error { + for i := range rr.Resources { + stop, err := fn(&rr.Resources[i]) + if err != nil { + return fmt.Errorf("cannot process resource %s: %w", rr.Resources[i].GroupVersionKind(), err) + } + if stop { + break + } } - kinds, _, err := rr.Client.Scheme().ObjectKinds(obj) - if err != nil { - return fmt.Errorf("cannot get kind of resource: %w", err) - } + return nil +} + +// RemoveResources removes resources from the ReconciliationRequest's Resources slice +// based on a provided predicate function. The predicate determines whether a resource +// should be removed. +// +// Parameters: +// - predicate: A function that takes a pointer to an unstructured.Unstructured object +// and returns a boolean indicating whether the resource should be removed. +func (rr *ReconciliationRequest) RemoveResources(predicate func(*unstructured.Unstructured) bool) error { + filtered := rr.Resources[:0] // Create a slice with zero length but full capacity + + for i := range rr.Resources { + if predicate(&rr.Resources[i]) { + continue + } - if len(kinds) != 1 { - return fmt.Errorf("expected to find a single GVK for %v, but got %d", obj, len(kinds)) + filtered = append(filtered, rr.Resources[i]) } - obj.GetObjectKind().SetGroupVersionKind(kinds[0]) + rr.Resources = filtered return nil } diff --git a/pkg/controller/types/types_test.go b/pkg/controller/types/types_test.go new file mode 100644 index 00000000000..ef52a8ac882 --- /dev/null +++ b/pkg/controller/types/types_test.go @@ -0,0 +1,122 @@ +package types_test + +import ( + "testing" + + "github.com/rs/xid" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster/gvk" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/types" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/utils/test/fakeclient" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/utils/test/matchers/jq" + + . "github.com/onsi/gomega" +) + +func TestReconciliationRequest_AddResource(t *testing.T) { + g := NewWithT(t) + + cl, err := fakeclient.New() + g.Expect(err).ToNot(HaveOccurred()) + + rr := types.ReconciliationRequest{Client: cl} + + g.Expect(rr.AddResources(&unstructured.Unstructured{})).To(HaveOccurred()) + g.Expect(rr.Resources).To(BeEmpty()) + + g.Expect(rr.AddResources(&corev1.ConfigMap{})).ToNot(HaveOccurred()) + g.Expect(rr.Resources).To(HaveLen(1)) + + g.Expect(rr.AddResources([]client.Object{}...)).ToNot(HaveOccurred()) + g.Expect(rr.Resources).To(HaveLen(1)) +} + +func TestReconciliationRequest_ForEachResource_UpdateSome(t *testing.T) { + g := NewWithT(t) + + cl, err := fakeclient.New() + g.Expect(err).ToNot(HaveOccurred()) + + rr := types.ReconciliationRequest{Client: cl} + g.Expect(rr.AddResources(&corev1.ConfigMap{})).ToNot(HaveOccurred()) + g.Expect(rr.AddResources(&corev1.Secret{})).ToNot(HaveOccurred()) + g.Expect(rr.Resources).To(HaveLen(2)) + + val := xid.New().String() + + g.Expect( + rr.ForEachResource(func(u *unstructured.Unstructured) (bool, error) { + if u.GroupVersionKind() == gvk.ConfigMap { + return false, nil + } + + if err := unstructured.SetNestedField(u.Object, val, "data", "key"); err != nil { + return false, err + } + + return true, nil + }), + ).ToNot(HaveOccurred()) + + g.Expect(rr.Resources).To(HaveLen(2)) + g.Expect(rr.Resources[0].Object).To(jq.Match(`has("data") | not`)) + g.Expect(rr.Resources[1].Object).To(jq.Match(`.data.key == "%s"`, val)) +} + +func TestReconciliationRequest_ForEachResource_UpdateAll(t *testing.T) { + g := NewWithT(t) + + cl, err := fakeclient.New() + g.Expect(err).ToNot(HaveOccurred()) + + rr := types.ReconciliationRequest{Client: cl} + g.Expect(rr.AddResources(&corev1.ConfigMap{})).ToNot(HaveOccurred()) + g.Expect(rr.AddResources(&corev1.Secret{})).ToNot(HaveOccurred()) + g.Expect(rr.Resources).To(HaveLen(2)) + + val := xid.New().String() + + g.Expect( + rr.ForEachResource(func(u *unstructured.Unstructured) (bool, error) { + if err := unstructured.SetNestedField(u.Object, val, "data", "key"); err != nil { + return false, err + } + + return false, nil + }), + ).ToNot(HaveOccurred()) + + g.Expect(rr.Resources).To(And( + HaveLen(2), + HaveEach(jq.Match(`.data.key == "%s"`, val)), + )) +} + +func TestReconciliationRequest_RemoveResources(t *testing.T) { + g := NewWithT(t) + + cl, err := fakeclient.New() + g.Expect(err).ToNot(HaveOccurred()) + + // Create a ReconciliationRequest with some resources + rr := types.ReconciliationRequest{Client: cl} + + err = rr.AddResources(&corev1.ConfigMap{}, &corev1.Secret{}) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(rr.Resources).To(HaveLen(2)) + + // Remove all ConfigMaps using the predicate function + err = rr.RemoveResources(func(u *unstructured.Unstructured) bool { + return u.GroupVersionKind() == gvk.ConfigMap + }) + + g.Expect(err).ToNot(HaveOccurred()) + + g.Expect(rr.Resources).To(And( + HaveLen(1), + HaveEach(jq.Match(`.kind == "%s"`, gvk.Secret.Kind)), + )) +} diff --git a/pkg/resources/resources.go b/pkg/resources/resources.go index 1c0cde96b88..7f3aa2f315b 100644 --- a/pkg/resources/resources.go +++ b/pkg/resources/resources.go @@ -242,3 +242,22 @@ func KindForObject(scheme *runtime.Scheme, obj runtime.Object) (string, error) { return gvk.Kind, nil } + +func EnsureGroupVersionKind(s *runtime.Scheme, obj client.Object) error { + if obj.GetObjectKind().GroupVersionKind().Kind != "" { + return nil + } + + kinds, _, err := s.ObjectKinds(obj) + if err != nil { + return fmt.Errorf("cannot get kind of resource: %w", err) + } + + if len(kinds) != 1 { + return fmt.Errorf("expected to find a single GVK for %v, but got %d", obj, len(kinds)) + } + + obj.GetObjectKind().SetGroupVersionKind(kinds[0]) + + return nil +} diff --git a/pkg/upgrade/upgrade.go b/pkg/upgrade/upgrade.go index f6d92d7e9f0..86aa52a889e 100644 --- a/pkg/upgrade/upgrade.go +++ b/pkg/upgrade/upgrade.go @@ -32,7 +32,6 @@ import ( featuresv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/features/v1" infrav1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/infrastructure/v1" componentsold "github.com/opendatahub-io/opendatahub-operator/v2/components" - "github.com/opendatahub-io/opendatahub-operator/v2/components/kserve" "github.com/opendatahub-io/opendatahub-operator/v2/components/modelmeshserving" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster/gvk" @@ -73,8 +72,8 @@ func CreateDefaultDSC(ctx context.Context, cli client.Client) error { DataSciencePipelines: componentsv1.DSCDataSciencePipelines{ ManagementSpec: components.ManagementSpec{ManagementState: operatorv1.Managed}, }, - Kserve: kserve.Kserve{ - Component: componentsold.Component{ManagementState: operatorv1.Managed}, + Kserve: componentsv1.DSCKserve{ + ManagementSpec: components.ManagementSpec{ManagementState: operatorv1.Managed}, }, CodeFlare: componentsv1.DSCCodeFlare{ ManagementSpec: components.ManagementSpec{ManagementState: operatorv1.Managed}, diff --git a/tests/e2e/controller_test.go b/tests/e2e/controller_test.go index e12ffc7ef4b..c2a95c88ef2 100644 --- a/tests/e2e/controller_test.go +++ b/tests/e2e/controller_test.go @@ -48,6 +48,7 @@ var ( "datasciencepipelienes": dataSciencePipelinesTestSuite, "codeflare": codeflareTestSuite, "workbenches": workbenchesTestSuite, + "kserve": kserveTestSuite, } ) diff --git a/tests/e2e/creation_test.go b/tests/e2e/creation_test.go index e9befa1e0c3..96e208c14c1 100644 --- a/tests/e2e/creation_test.go +++ b/tests/e2e/creation_test.go @@ -70,19 +70,13 @@ func creationTestSuite(t *testing.T) { }) } - // // Kserve - // t.Run("Validate Knative resoruce", func(t *testing.T) { - // err = testCtx.validateDSC() - // require.NoError(t, err, "error getting Knatvie resrouce as part of DataScienceCluster validation") - // }) - // t.Run("Validate default certs available", func(t *testing.T) { - // // move it to be part of check with kserve since it is using serving's secret - // err = testCtx.testDefaultCertsAvailable() - // require.NoError(t, err, "error getting default cert secrets for Kserve") - // }) - // - // ModelReg + // Kserve + t.Run("Validate Knative resource", func(t *testing.T) { + err = testCtx.validateDSC() + require.NoError(t, err, "error getting Knative resource as part of DataScienceCluster validation") + }) + // ModelReg if testCtx.testOpts.webhookTest { t.Run("Validate model registry config", func(t *testing.T) { err = testCtx.validateModelRegistryConfig() diff --git a/tests/e2e/helper_test.go b/tests/e2e/helper_test.go index 1f2bbb384ba..bf19457cf8a 100644 --- a/tests/e2e/helper_test.go +++ b/tests/e2e/helper_test.go @@ -27,7 +27,6 @@ import ( dsciv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/dscinitialization/v1" infrav1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/infrastructure/v1" componentsold "github.com/opendatahub-io/opendatahub-operator/v2/components" - "github.com/opendatahub-io/opendatahub-operator/v2/components/kserve" "github.com/opendatahub-io/opendatahub-operator/v2/components/modelmeshserving" "github.com/opendatahub-io/opendatahub-operator/v2/controllers/components/modelregistry" ) @@ -133,12 +132,20 @@ func setupDSCInstance(name string) *dscv1.DataScienceCluster { ManagementState: operatorv1.Managed, }, }, - Kserve: kserve.Kserve{ - Component: componentsold.Component{ - ManagementState: operatorv1.Removed, + Kserve: componentsv1.DSCKserve{ + ManagementSpec: components.ManagementSpec{ + ManagementState: operatorv1.Managed, }, - Serving: infrav1.ServingSpec{ - ManagementState: operatorv1.Removed, + KserveCommonSpec: componentsv1.KserveCommonSpec{ + Serving: infrav1.ServingSpec{ + ManagementState: operatorv1.Managed, + Name: "knative-serving", + IngressGateway: infrav1.GatewaySpec{ + Certificate: infrav1.CertificateSpec{ + Type: infrav1.OpenshiftDefaultIngress, + }, + }, + }, }, }, CodeFlare: componentsv1.DSCCodeFlare{ diff --git a/tests/e2e/kserve_test.go b/tests/e2e/kserve_test.go new file mode 100644 index 00000000000..73abc75ad56 --- /dev/null +++ b/tests/e2e/kserve_test.go @@ -0,0 +1,283 @@ +package e2e_test + +import ( + "strings" + "testing" + "time" + + operatorv1 "github.com/openshift/api/operator/v1" + "github.com/stretchr/testify/require" + autoscalingv1 "k8s.io/api/autoscaling/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + k8slabels "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + + componentsv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/components/v1" + dscv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/datasciencecluster/v1" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster/gvk" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/metadata/labels" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/resources" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/utils/test/matchers/jq" + + . "github.com/onsi/gomega" +) + +type KserveTestCtx struct { + *testContext +} + +func kserveTestSuite(t *testing.T) { + t.Helper() + + tc, err := NewTestContext() + require.NoError(t, err) + + componentCtx := KserveTestCtx{ + testContext: tc, + } + + t.Run(componentCtx.testDsc.Name, func(t *testing.T) { + t.Run("Validate Kserve instance", componentCtx.validateKserveInstance) + t.Run("Validate default certs available", componentCtx.validateDefaultCertsAvailable) + t.Run("Validate Kserve operands OwnerReferences", componentCtx.validateOperandsOwnerReferences) + t.Run("Validate Update Kserve operands resources", componentCtx.validateUpdateKserveOperandsResources) + // must be the latest one + t.Run("Validate Disabling Kserve Component", componentCtx.validateKserveDisabled) + }) +} + +func (k *KserveTestCtx) validateKserveInstance(t *testing.T) { + g := k.WithT(t) + + g.Eventually( + k.List(gvk.Kserve), + ).Should(And( + HaveLen(1), + HaveEach(And( + jq.Match(`.metadata.ownerReferences[0].kind == "%s"`, gvk.DataScienceCluster.Kind), + jq.Match(`.spec.serving.name == "%s"`, k.testDsc.Spec.Components.Kserve.Serving.Name), + jq.Match(`.spec.serving.managementState == "%s"`, k.testDsc.Spec.Components.Kserve.Serving.ManagementState), + jq.Match(`.spec.serving.ingressGateway.certificate.type == "%s"`, + k.testDsc.Spec.Components.Kserve.Serving.IngressGateway.Certificate.Type), + + jq.Match(`.status.phase == "%s"`, readyStatus), + )), + )) +} + +func (k *KserveTestCtx) validateDefaultCertsAvailable(t *testing.T) { + err := k.testDefaultCertsAvailable() + require.NoError(t, err, "error getting default cert secrets for Kserve") +} + +func (k *KserveTestCtx) validateOperandsOwnerReferences(t *testing.T) { + g := k.WithT(t) + + g.Eventually( + k.List( + gvk.Deployment, + client.InNamespace(k.applicationsNamespace), + client.MatchingLabels{labels.ComponentPartOf: strings.ToLower(componentsv1.KserveKind)}, + ), + ).Should(And( + HaveLen(2), + HaveEach( + jq.Match(`.metadata.ownerReferences[0].kind == "%s"`, componentsv1.KserveKind), + ), + )) +} + +func (k *KserveTestCtx) validateUpdateKserveOperandsResources(t *testing.T) { + g := k.WithT(t) + + matchLabels := map[string]string{ + "control-plane": "kserve-controller-manager", + labels.ComponentPartOf: strings.ToLower(componentsv1.KserveKind), + } + + listOpts := []client.ListOption{ + client.MatchingLabels(matchLabels), + client.InNamespace(k.applicationsNamespace), + } + + appDeployments, err := k.kubeClient.AppsV1().Deployments(k.applicationsNamespace).List( + k.ctx, + metav1.ListOptions{ + LabelSelector: k8slabels.SelectorFromSet(matchLabels).String(), + }, + ) + + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(appDeployments.Items).To(HaveLen(1)) + + const expectedReplica int32 = 2 // from 1 to 2 + + testDeployment := appDeployments.Items[0] + patchedReplica := &autoscalingv1.Scale{ + ObjectMeta: metav1.ObjectMeta{ + Name: testDeployment.Name, + Namespace: testDeployment.Namespace, + }, + Spec: autoscalingv1.ScaleSpec{ + Replicas: expectedReplica, + }, + Status: autoscalingv1.ScaleStatus{}, + } + + updatedDep, err := k.kubeClient.AppsV1().Deployments(k.applicationsNamespace).UpdateScale( + k.ctx, + testDeployment.Name, + patchedReplica, + metav1.UpdateOptions{}, + ) + + g.Expect(err).ShouldNot(HaveOccurred()) + g.Expect(updatedDep.Spec.Replicas).Should(Equal(patchedReplica.Spec.Replicas)) + + g.Eventually( + k.List( + gvk.Deployment, + listOpts..., + ), + ).Should(And( + HaveLen(1), + HaveEach( + jq.Match(`.spec.replicas == %d`, expectedReplica), + ), + )) + + g.Consistently( + k.List( + gvk.Deployment, + listOpts..., + ), + ).WithTimeout(30 * time.Second).WithPolling(1 * time.Second).Should(And( + HaveLen(1), + HaveEach( + jq.Match(`.spec.replicas == %d`, expectedReplica), + ), + )) +} + +func (k *KserveTestCtx) validateKserveDisabled(t *testing.T) { + g := k.WithT(t) + + g.Eventually( + k.List( + gvk.Deployment, + client.InNamespace(k.applicationsNamespace), + client.MatchingLabels{labels.ComponentPartOf: strings.ToLower(componentsv1.KserveKind)}, + ), + ).Should( + HaveLen(2), + ) + + g.Eventually( + k.updateComponent(func(c *dscv1.Components) { + c.Kserve.ManagementState = operatorv1.Removed + }), + ).ShouldNot( + HaveOccurred(), + ) + + g.Eventually( + k.List( + gvk.Deployment, + client.InNamespace(k.applicationsNamespace), + client.MatchingLabels{labels.ComponentPartOf: strings.ToLower(componentsv1.KserveKind)}, + ), + ).Should( + BeEmpty(), + ) + + g.Eventually( + k.List(gvk.Kserve), + ).Should( + BeEmpty(), + ) +} + +func (k *KserveTestCtx) WithT(t *testing.T) *WithT { + t.Helper() + + g := NewWithT(t) + g.SetDefaultEventuallyTimeout(generalWaitTimeout) + g.SetDefaultEventuallyPollingInterval(1 * time.Second) + + return g +} + +func (k *KserveTestCtx) List( + gvk schema.GroupVersionKind, + option ...client.ListOption, +) func() ([]unstructured.Unstructured, error) { + return func() ([]unstructured.Unstructured, error) { + items := unstructured.UnstructuredList{} + items.SetGroupVersionKind(gvk) + + err := k.customClient.List(k.ctx, &items, option...) + if err != nil { + return nil, err + } + + return items.Items, nil + } +} + +func (k *KserveTestCtx) Get( + gvk schema.GroupVersionKind, + ns string, + name string, + option ...client.GetOption, +) func() (*unstructured.Unstructured, error) { + return func() (*unstructured.Unstructured, error) { + u := unstructured.Unstructured{} + u.SetGroupVersionKind(gvk) + + err := k.customClient.Get(k.ctx, client.ObjectKey{Namespace: ns, Name: name}, &u, option...) + if err != nil { + return nil, err + } + + return &u, nil + } +} +func (k *KserveTestCtx) MergePatch( + obj client.Object, + patch []byte, +) func() (*unstructured.Unstructured, error) { + return func() (*unstructured.Unstructured, error) { + u, err := resources.ToUnstructured(obj) + if err != nil { + return nil, err + } + + err = k.customClient.Patch(k.ctx, u, client.RawPatch(types.MergePatchType, patch)) + if err != nil { + return nil, err + } + + return u, nil + } +} + +func (k *KserveTestCtx) updateComponent(fn func(dsc *dscv1.Components)) func() error { + return func() error { + err := k.customClient.Get(k.ctx, types.NamespacedName{Name: k.testDsc.Name}, k.testDsc) + if err != nil { + return err + } + + fn(&k.testDsc.Spec.Components) + + err = k.customClient.Update(k.ctx, k.testDsc) + if err != nil { + return err + } + + return nil + } +} diff --git a/tests/e2e/odh_manager_test.go b/tests/e2e/odh_manager_test.go index 3ce2ada6dde..559baf5cda0 100644 --- a/tests/e2e/odh_manager_test.go +++ b/tests/e2e/odh_manager_test.go @@ -86,4 +86,10 @@ func (tc *testContext) validateOwnedCRDs(t *testing.T) { require.NoErrorf(t, tc.validateCRD("workbenches.components.opendatahub.io"), "error in validating CRD : workbenches.components.opendatahub.io") }) + + t.Run("Validate Kserve CRD", func(t *testing.T) { + t.Parallel() + require.NoErrorf(t, tc.validateCRD("kserves.components.opendatahub.io"), + "error in validating CRD : kserves.components.opendatahub.io") + }) } diff --git a/tests/e2e/ray_test.go b/tests/e2e/ray_test.go index cd62819e2fe..2f4ffed19ed 100644 --- a/tests/e2e/ray_test.go +++ b/tests/e2e/ray_test.go @@ -50,7 +50,7 @@ func rayTestSuite(t *testing.T) { require.NoError(t, err, "error validating Ray instance") }) - t.Run("Validate Ownerrefrences exist", func(t *testing.T) { + t.Run("Validate Ownerreferences exist", func(t *testing.T) { err = rayCtx.testOwnerReferences() require.NoError(t, err, "error getting all Ray's Ownerrefrences") }) diff --git a/tests/integration/features/serverless_feature_test.go b/tests/integration/features/serverless_feature_test.go index 7f116479592..39843aa06fe 100644 --- a/tests/integration/features/serverless_feature_test.go +++ b/tests/integration/features/serverless_feature_test.go @@ -13,9 +13,9 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/envtest" + componentsv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/components/v1" dsciv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/dscinitialization/v1" infrav1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/infrastructure/v1" - "github.com/opendatahub-io/opendatahub-operator/v2/components/kserve" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/feature" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/feature/serverless" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/feature/servicemesh" @@ -31,7 +31,7 @@ var _ = Describe("Serverless feature", func() { var ( dsci *dsciv1.DSCInitialization objectCleaner *envtestutil.Cleaner - kserveComponent *kserve.Kserve + kserveComponent *componentsv1.Kserve ) BeforeEach(func(ctx context.Context) { @@ -43,7 +43,7 @@ var _ = Describe("Serverless feature", func() { namespace := envtestutil.AppendRandomNameTo("ns-serverless") dsciName := envtestutil.AppendRandomNameTo("dsci-serverless") dsci = fixtures.NewDSCInitialization(ctx, envTestClient, dsciName, namespace) - kserveComponent = &kserve.Kserve{} + kserveComponent = &componentsv1.Kserve{} }) Context("verifying preconditions", func() { @@ -63,7 +63,7 @@ var _ = Describe("Serverless feature", func() { return nil } - featuresHandler := feature.ComponentFeaturesHandler(dsci, kserveComponent.GetComponentName(), dsci.Spec.ApplicationsNamespace, featuresProvider) + featuresHandler := feature.ComponentFeaturesHandler(dsci, componentsv1.KserveComponentName, dsci.Spec.ApplicationsNamespace, featuresProvider) // when applyErr := featuresHandler.Apply(ctx, envTestClient) @@ -111,7 +111,7 @@ var _ = Describe("Serverless feature", func() { return nil } - featuresHandler := feature.ComponentFeaturesHandler(dsci, kserveComponent.GetComponentName(), dsci.Spec.ApplicationsNamespace, featuresProvider) + featuresHandler := feature.ComponentFeaturesHandler(dsci, componentsv1.KserveComponentName, dsci.Spec.ApplicationsNamespace, featuresProvider) // then Expect(featuresHandler.Apply(ctx, envTestClient)).To(Succeed()) @@ -130,7 +130,7 @@ var _ = Describe("Serverless feature", func() { return nil } - featuresHandler := feature.ComponentFeaturesHandler(dsci, kserveComponent.GetComponentName(), dsci.Spec.ApplicationsNamespace, featuresProvider) + featuresHandler := feature.ComponentFeaturesHandler(dsci, componentsv1.KserveComponentName, dsci.Spec.ApplicationsNamespace, featuresProvider) // then Expect(featuresHandler.Apply(ctx, envTestClient)).To(Succeed()) @@ -160,7 +160,7 @@ var _ = Describe("Serverless feature", func() { return nil } - featuresHandler := feature.ComponentFeaturesHandler(dsci, kserveComponent.GetComponentName(), dsci.Spec.ApplicationsNamespace, featuresProvider) + featuresHandler := feature.ComponentFeaturesHandler(dsci, componentsv1.KserveComponentName, dsci.Spec.ApplicationsNamespace, featuresProvider) // then Expect(featuresHandler.Apply(ctx, envTestClient)).ToNot(Succeed()) @@ -271,17 +271,17 @@ var _ = Describe("Serverless feature", func() { It("should create a TLS secret if certificate is SelfSigned", func(ctx context.Context) { // given - kserveComponent.Serving.IngressGateway.Certificate.Type = infrav1.SelfSigned - kserveComponent.Serving.IngressGateway.Domain = fixtures.TestDomainFooCom + kserveComponent.Spec.Serving.IngressGateway.Certificate.Type = infrav1.SelfSigned + kserveComponent.Spec.Serving.IngressGateway.Domain = fixtures.TestDomainFooCom featuresProvider := func(registry feature.FeaturesRegistry) error { errFeatureAdd := registry.Add( feature.Define("tls-secret-creation"). WithData( servicemesh.FeatureData.ControlPlane.Define(&dsci.Spec).AsAction(), - serverless.FeatureData.Serving.Define(&kserveComponent.Serving).AsAction(), - serverless.FeatureData.IngressDomain.Define(&kserveComponent.Serving).AsAction(), - serverless.FeatureData.CertificateName.Define(&kserveComponent.Serving).AsAction(), + serverless.FeatureData.Serving.Define(&kserveComponent.Spec.Serving).AsAction(), + serverless.FeatureData.IngressDomain.Define(&kserveComponent.Spec.Serving).AsAction(), + serverless.FeatureData.CertificateName.Define(&kserveComponent.Spec.Serving).AsAction(), ). WithResources(serverless.ServingCertificateResource), ) @@ -291,7 +291,7 @@ var _ = Describe("Serverless feature", func() { return nil } - featuresHandler := feature.ComponentFeaturesHandler(dsci, kserveComponent.GetComponentName(), dsci.Spec.ApplicationsNamespace, featuresProvider) + featuresHandler := feature.ComponentFeaturesHandler(dsci, componentsv1.KserveComponentName, dsci.Spec.ApplicationsNamespace, featuresProvider) // when Expect(featuresHandler.Apply(ctx, envTestClient)).To(Succeed()) @@ -313,17 +313,17 @@ var _ = Describe("Serverless feature", func() { It("should not create any TLS secret if certificate is user provided", func(ctx context.Context) { // given - kserveComponent.Serving.IngressGateway.Certificate.Type = infrav1.Provided - kserveComponent.Serving.IngressGateway.Domain = fixtures.TestDomainFooCom + kserveComponent.Spec.Serving.IngressGateway.Certificate.Type = infrav1.Provided + kserveComponent.Spec.Serving.IngressGateway.Domain = fixtures.TestDomainFooCom featuresProvider := func(registry feature.FeaturesRegistry) error { errFeatureAdd := registry.Add( feature.Define("tls-secret-creation"). WithData( servicemesh.FeatureData.ControlPlane.Define(&dsci.Spec).AsAction(), - serverless.FeatureData.Serving.Define(&kserveComponent.Serving).AsAction(), - serverless.FeatureData.IngressDomain.Define(&kserveComponent.Serving).AsAction(), - serverless.FeatureData.CertificateName.Define(&kserveComponent.Serving).AsAction(), + serverless.FeatureData.Serving.Define(&kserveComponent.Spec.Serving).AsAction(), + serverless.FeatureData.IngressDomain.Define(&kserveComponent.Spec.Serving).AsAction(), + serverless.FeatureData.CertificateName.Define(&kserveComponent.Spec.Serving).AsAction(), ). WithResources(serverless.ServingCertificateResource), ) @@ -333,7 +333,7 @@ var _ = Describe("Serverless feature", func() { return nil } - featuresHandler := feature.ComponentFeaturesHandler(dsci, kserveComponent.GetComponentName(), dsci.Spec.ApplicationsNamespace, featuresProvider) + featuresHandler := feature.ComponentFeaturesHandler(dsci, componentsv1.KserveComponentName, dsci.Spec.ApplicationsNamespace, featuresProvider) // when Expect(featuresHandler.Apply(ctx, envTestClient)).To(Succeed())