-
Notifications
You must be signed in to change notification settings - Fork 277
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
[Feature] Kuberay RayJob MultiKueue adapter #3892
base: main
Are you sure you want to change the base?
Changes from all commits
5af837a
1f76919
b0dbe2d
fbc69e0
1801ddc
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -40,6 +40,14 @@ if [[ -n ${KUBEFLOW_MPI_VERSION:-} ]]; then | |
export KUBEFLOW_MPI_IMAGE=mpioperator/mpi-operator:${KUBEFLOW_MPI_VERSION/#v} | ||
fi | ||
|
||
if [[ -n ${KUBERAY_VERSION:-} ]]; then | ||
export KUBERAY_MANIFEST="${ROOT_DIR}/dep-crds/ray-operator/default/" | ||
export KUBERAY_IMAGE=bitnami/kuberay-operator:${KUBERAY_VERSION/#v} | ||
export KUBERAY_RAY_IMAGE=rayproject/ray:2.9.0 | ||
export KUBERAY_RAY_IMAGE_ARM=rayproject/ray:2.9.0-aarch64 | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. this one is for us people working on macOS, it's vital for development, not so much for prod - I think it should stay |
||
export KUBERAY_CRDS=${ROOT_DIR}/dep-crds/ray-operator/crd/bases | ||
fi | ||
|
||
# sleep image to use for testing. | ||
export E2E_TEST_SLEEP_IMAGE_OLD=gcr.io/k8s-staging-perf-tests/sleep:v0.0.3@sha256:00ae8e01dd4439edfb7eb9f1960ac28eba16e952956320cce7f2ac08e3446e6b | ||
E2E_TEST_SLEEP_IMAGE_OLD_WITHOUT_SHA=${E2E_TEST_SLEEP_IMAGE_OLD%%@*} | ||
|
@@ -89,6 +97,17 @@ function prepare_docker_images { | |
if [[ -n ${KUBEFLOW_MPI_VERSION:-} ]]; then | ||
docker pull "${KUBEFLOW_MPI_IMAGE}" | ||
fi | ||
if [[ -n ${KUBERAY_VERSION:-} ]]; then | ||
docker pull "${KUBERAY_IMAGE}" | ||
|
||
# Extra e2e images required for Kuberay | ||
unamestr=$(uname) | ||
if [[ "$unamestr" == 'Linux' ]]; then | ||
docker pull "${KUBERAY_RAY_IMAGE}" | ||
elif [[ "$unamestr" == 'Darwin' ]]; then | ||
docker pull "${KUBERAY_RAY_IMAGE_ARM}" | ||
fi | ||
fi | ||
} | ||
|
||
# $1 cluster | ||
|
@@ -136,6 +155,22 @@ function install_mpi { | |
kubectl apply --server-side -f "${KUBEFLOW_MPI_MANIFEST}" | ||
} | ||
|
||
#$1 - cluster name | ||
function install_kuberay { | ||
# Extra e2e images required for Kuberay | ||
unamestr=$(uname) | ||
if [[ "$unamestr" == 'Linux' ]]; then | ||
cluster_kind_load_image "${1}" "${KUBERAY_RAY_IMAGE}" | ||
elif [[ "$unamestr" == 'Darwin' ]]; then | ||
cluster_kind_load_image "${1}" "${KUBERAY_RAY_IMAGE_ARM}" | ||
fi | ||
|
||
cluster_kind_load_image "${1}" "${KUBERAY_IMAGE}" | ||
kubectl config use-context "kind-${1}" | ||
# create used instead of apply - https://github.com/ray-project/kuberay/issues/504 | ||
kubectl create -k "${KUBERAY_MANIFEST}" | ||
} | ||
|
||
INITIAL_IMAGE=$($YQ '.images[] | select(.name == "controller") | [.newName, .newTag] | join(":")' config/components/manager/kustomization.yaml) | ||
export INITIAL_IMAGE | ||
|
||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -26,6 +26,7 @@ import ( | |
corev1 "k8s.io/api/core/v1" | ||
"k8s.io/apimachinery/pkg/api/resource" | ||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" | ||
"k8s.io/apimachinery/pkg/runtime" | ||
"k8s.io/apimachinery/pkg/runtime/schema" | ||
"k8s.io/apimachinery/pkg/types" | ||
utilruntime "k8s.io/apimachinery/pkg/util/runtime" | ||
|
@@ -55,12 +56,13 @@ func init() { | |
JobType: &rayv1.RayJob{}, | ||
AddToScheme: rayv1.AddToScheme, | ||
IsManagingObjectsOwner: isRayJob, | ||
MultiKueueAdapter: &multikueueAdapter{}, | ||
})) | ||
} | ||
|
||
// +kubebuilder:rbac:groups="",resources=events,verbs=create;watch;update | ||
// +kubebuilder:rbac:groups=ray.io,resources=rayjobs,verbs=get;list;watch;update;patch | ||
// +kubebuilder:rbac:groups=ray.io,resources=rayjobs/status,verbs=get;update | ||
// +kubebuilder:rbac:groups=ray.io,resources=rayjobs/status,verbs=get;update;patch | ||
// +kubebuilder:rbac:groups=ray.io,resources=rayjobs/finalizers,verbs=get;update | ||
// +kubebuilder:rbac:groups=kueue.x-k8s.io,resources=workloads,verbs=get;list;watch;create;update;patch;delete | ||
// +kubebuilder:rbac:groups=kueue.x-k8s.io,resources=workloads/status,verbs=get;update;patch | ||
|
@@ -82,12 +84,16 @@ func (j *RayJob) Object() client.Object { | |
return (*rayv1.RayJob)(j) | ||
} | ||
|
||
func fromObject(obj runtime.Object) *RayJob { | ||
return (*RayJob)(obj.(*rayv1.RayJob)) | ||
} | ||
|
||
func (j *RayJob) IsSuspended() bool { | ||
return j.Spec.Suspend | ||
} | ||
|
||
func (j *RayJob) IsActive() bool { | ||
return j.Status.JobDeploymentStatus != rayv1.JobDeploymentStatusSuspended | ||
return (j.Status.JobDeploymentStatus != rayv1.JobDeploymentStatusSuspended) && (j.Status.JobDeploymentStatus != rayv1.JobDeploymentStatusNew) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Why is this change needed? Maybe this is a fix for regular RayJobs too? In that case we should do it in a separate PR so that it can be cherry-picked. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Yes, I believe it's needed fix. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Interesting, maybe it is a race condition? Please open a separate PR, we may consider cherry-picking the fix then. |
||
} | ||
|
||
func (j *RayJob) Suspend() { | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,123 @@ | ||
/* | ||
Copyright 2024 The Kubernetes Authors. | ||
|
||
Licensed under the Apache License, Version 2.0 (the "License"); | ||
you may not use this file except in compliance with the License. | ||
You may obtain a copy of the License at | ||
|
||
http://www.apache.org/licenses/LICENSE-2.0 | ||
|
||
Unless required by applicable law or agreed to in writing, software | ||
distributed under the License is distributed on an "AS IS" BASIS, | ||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
See the License for the specific language governing permissions and | ||
limitations under the License. | ||
*/ | ||
|
||
package rayjob | ||
|
||
import ( | ||
"context" | ||
"errors" | ||
"fmt" | ||
|
||
rayv1 "github.com/ray-project/kuberay/ray-operator/apis/ray/v1" | ||
"k8s.io/apimachinery/pkg/runtime" | ||
"k8s.io/apimachinery/pkg/runtime/schema" | ||
"k8s.io/apimachinery/pkg/types" | ||
"k8s.io/klog/v2" | ||
ctrl "sigs.k8s.io/controller-runtime" | ||
"sigs.k8s.io/controller-runtime/pkg/client" | ||
|
||
kueue "sigs.k8s.io/kueue/apis/kueue/v1beta1" | ||
"sigs.k8s.io/kueue/pkg/controller/constants" | ||
"sigs.k8s.io/kueue/pkg/controller/jobframework" | ||
"sigs.k8s.io/kueue/pkg/util/api" | ||
clientutil "sigs.k8s.io/kueue/pkg/util/client" | ||
) | ||
|
||
type multikueueAdapter struct{} | ||
|
||
var _ jobframework.MultiKueueAdapter = (*multikueueAdapter)(nil) | ||
|
||
func (b *multikueueAdapter) SyncJob(ctx context.Context, localClient client.Client, remoteClient client.Client, key types.NamespacedName, workloadName, origin string) error { | ||
log := ctrl.LoggerFrom(ctx) | ||
|
||
localJob := rayv1.RayJob{} | ||
err := localClient.Get(ctx, key, &localJob) | ||
if err != nil { | ||
return err | ||
} | ||
|
||
remoteJob := rayv1.RayJob{} | ||
err = remoteClient.Get(ctx, key, &remoteJob) | ||
if client.IgnoreNotFound(err) != nil { | ||
return err | ||
} | ||
|
||
// if the remote exists, just copy the status | ||
if err == nil { | ||
mszadkow marked this conversation as resolved.
Show resolved
Hide resolved
|
||
if fromObject(&localJob).IsSuspended() { | ||
// Ensure the job is unsuspended before updating its status; otherwise, it will fail when patching the spec. | ||
log.V(2).Info("Skipping the sync since the local job is still suspended") | ||
return nil | ||
} | ||
return clientutil.PatchStatus(ctx, localClient, &localJob, func() (bool, error) { | ||
localJob.Status = remoteJob.Status | ||
return true, nil | ||
}) | ||
} | ||
|
||
remoteJob = rayv1.RayJob{ | ||
ObjectMeta: api.CloneObjectMetaForCreation(&localJob.ObjectMeta), | ||
Spec: *localJob.Spec.DeepCopy(), | ||
} | ||
|
||
// add the prebuilt workload | ||
if remoteJob.Labels == nil { | ||
remoteJob.Labels = make(map[string]string, 2) | ||
} | ||
remoteJob.Labels[constants.PrebuiltWorkloadLabel] = workloadName | ||
remoteJob.Labels[kueue.MultiKueueOriginLabel] = origin | ||
|
||
return remoteClient.Create(ctx, &remoteJob) | ||
} | ||
|
||
func (b *multikueueAdapter) DeleteRemoteObject(ctx context.Context, remoteClient client.Client, key types.NamespacedName) error { | ||
job := rayv1.RayJob{} | ||
job.SetName(key.Name) | ||
job.SetNamespace(key.Namespace) | ||
return client.IgnoreNotFound(remoteClient.Delete(ctx, &job)) | ||
} | ||
|
||
func (b *multikueueAdapter) KeepAdmissionCheckPending() bool { | ||
return false | ||
} | ||
|
||
func (b *multikueueAdapter) IsJobManagedByKueue(ctx context.Context, c client.Client, key types.NamespacedName) (bool, string, error) { | ||
return true, "", nil | ||
} | ||
|
||
func (b *multikueueAdapter) GVK() schema.GroupVersionKind { | ||
return gvk | ||
} | ||
|
||
var _ jobframework.MultiKueueWatcher = (*multikueueAdapter)(nil) | ||
|
||
func (*multikueueAdapter) GetEmptyList() client.ObjectList { | ||
return &rayv1.RayJobList{} | ||
} | ||
|
||
func (*multikueueAdapter) WorkloadKeyFor(o runtime.Object) (types.NamespacedName, error) { | ||
job, isJob := o.(*rayv1.RayJob) | ||
if !isJob { | ||
return types.NamespacedName{}, errors.New("not a rayjob") | ||
} | ||
|
||
prebuiltWl, hasPrebuiltWorkload := job.Labels[constants.PrebuiltWorkloadLabel] | ||
if !hasPrebuiltWorkload { | ||
return types.NamespacedName{}, fmt.Errorf("no prebuilt workload found for rayjob: %s", klog.KObj(job)) | ||
} | ||
|
||
return types.NamespacedName{Name: prebuiltWl, Namespace: job.Namespace}, nil | ||
} |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
So far it's the version without managedBy.
We don't have a ray-operator image that supports managedBy, that would have to be a custom one...
latest -> 1.2.2, which is the last release