Skip to content

Commit

Permalink
vendor: Bump k8s to 0.31.0 (kubevirt#1964)
Browse files Browse the repository at this point in the history
* cluster: Bump kubevirtci tag, refactor install function

Bump to latest kubevirtci tag
Refactor cluster::install(), replacing single bracket "-o" syntax.

Signed-off-by: Ram Lavi <ralavi@redhat.com>

* vendor: Bump k8s to 0.31.0

Other significant bumps that were needed:
- controller-runtime to v0.19.3
- kubevirt to v1.4.0
- prometheus-operator to v0.68.0

Updated relevant functions accordingly accordingly.

Signed-off-by: Ram Lavi <ralavi@redhat.com>

---------

Signed-off-by: Ram Lavi <ralavi@redhat.com>
  • Loading branch information
RamLavi committed Jan 9, 2025
1 parent caaeca8 commit 1af6e7d
Show file tree
Hide file tree
Showing 4,507 changed files with 391,405 additions and 176,979 deletions.
The diff you're trying to view is too large. We only load the first 3000 changed files.
5 changes: 2 additions & 3 deletions cluster/cluster.sh
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@
# limitations under the License.

export KUBEVIRT_PROVIDER=${KUBEVIRT_PROVIDER:-'k8s-1.30'}
export KUBEVIRTCI_TAG=${KUBEVIRTCI_TAG:-2409241245-d93dec16}
export KUBEVIRTCI_TAG=${KUBEVIRTCI_TAG:-2412171619-fbd31717}

KUBEVIRTCI_REPO='https://github.com/kubevirt/kubevirtci.git'
# The CLUSTER_PATH var is used in cluster folder and points to the _kubevirtci where the cluster is deployed from.
Expand All @@ -28,9 +28,8 @@ function cluster::_get_tag() {
}

function cluster::install() {
# Remove cloned kubevirtci repository if it does not match the requested one
if [ -d ${CLUSTER_PATH} ]; then
if [ $(cluster::_get_repo) != ${KUBEVIRTCI_REPO} -o $(cluster::_get_tag) != ${KUBEVIRTCI_TAG} ]; then
if [[ $(cluster::_get_repo) != ${KUBEVIRTCI_REPO} || $(cluster::_get_tag) != ${KUBEVIRTCI_TAG} ]]; then
rm -rf ${CLUSTER_PATH}
fi
fi
Expand Down
25 changes: 20 additions & 5 deletions cmd/manager/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -3,9 +3,16 @@ package main
import (
"flag"
"log"
"net/http"
"os"
"runtime"

"k8s.io/apimachinery/pkg/api/meta"
"k8s.io/client-go/rest"
"sigs.k8s.io/controller-runtime/pkg/cache"
"sigs.k8s.io/controller-runtime/pkg/client/apiutil"
metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server"

"github.com/machadovilaca/operator-observability/pkg/operatormetrics"
osv1 "github.com/openshift/api/operator/v1"
"github.com/spf13/pflag"
Expand Down Expand Up @@ -50,7 +57,7 @@ func main() {

printVersion()

namespace, err := k8s.GetWatchNamespace()
watchNamespace, err := k8s.GetWatchNamespace()
if err != nil {
log.Printf("failed to get watch namespace: %v", err)
os.Exit(1)
Expand All @@ -65,10 +72,18 @@ func main() {

// Create a new Cmd to provide shared dependencies and start components
mgr, err := manager.New(cfg, manager.Options{
Scheme: scheme,
Namespace: namespace,
MetricsBindAddress: controllerruntimemetrics.DefaultBindAddress,
MapperProvider: k8s.NewDynamicRESTMapper,
Scheme: scheme,
Cache: cache.Options{
DefaultNamespaces: map[string]cache.Config{
watchNamespace: {},
},
},
Metrics: metricsserver.Options{
BindAddress: metricsserver.DefaultBindAddress,
},
MapperProvider: func(c *rest.Config, httpClient *http.Client) (meta.RESTMapper, error) {
return apiutil.NewDynamicRESTMapper(c, httpClient)
},
})
if err != nil {
log.Printf("failed to instantiate new operator manager: %v", err)
Expand Down
232 changes: 115 additions & 117 deletions go.mod

Large diffs are not rendered by default.

2,521 changes: 2,252 additions & 269 deletions go.sum

Large diffs are not rendered by default.

Original file line number Diff line number Diff line change
Expand Up @@ -107,7 +107,7 @@ func Add(mgr manager.Manager) error {

// newReconciler returns a new ReconcileNetworkAddonsConfig
func newReconciler(mgr manager.Manager, namespace string, clusterInfo *network.ClusterInfo) *ReconcileNetworkAddonsConfig {
// Status manager is shared between both reconcilers and it is used to update conditions of
// Status manager is shared between both reconcilers, and it is used to update conditions of
// NetworkAddonsConfig.State. NetworkAddonsConfig reconciler updates it with progress of rendering
// and applying of manifests. Pods reconciler updates it with progress of deployed pods.
statusManager := statusmanager.New(mgr, names.OPERATOR_CONFIG)
Expand All @@ -122,39 +122,51 @@ func newReconciler(mgr manager.Manager, namespace string, clusterInfo *network.C
}
}

type ctrlPredicate[T metav1.Object] struct {
predicate.TypedFuncs[T]
}

func (p ctrlPredicate[T]) Update(e event.TypedUpdateEvent[T]) bool {
oldConfig, err := runtimeObjectToNetworkAddonsConfig(e.ObjectOld)
if err != nil {
log.Printf("Failed to convert runtime.Object to NetworkAddonsConfig (old): %v", err)
return false
}
newConfig, err := runtimeObjectToNetworkAddonsConfig(e.ObjectNew)
if err != nil {
log.Printf("Failed to convert runtime.Object to NetworkAddonsConfig (new): %v", err)
return false
}
return !reflect.DeepEqual(oldConfig.Spec, newConfig.Spec)
}

// add adds a new Controller to mgr with r as the ReconcileNetworkAddonsConfig
func add(mgr manager.Manager, r *ReconcileNetworkAddonsConfig) error {
// Create a new controller for operator's NetworkAddonsConfig resource
c, err := controller.New("networkaddonsconfig-controller", mgr, controller.Options{Reconciler: r})
if err != nil {
return err
}

// Create custom predicate for NetworkAddonsConfig watcher. This makes sure that Status field
// updates will not trigger reconciling of the object. Reconciliation is trigger only if
// Spec fields differ.
pred := predicate.Funcs{
UpdateFunc: func(e event.UpdateEvent) bool {
oldConfig, err := runtimeObjectToNetworkAddonsConfig(e.ObjectOld)
if err != nil {
log.Printf("Failed to convert runtime.Object to NetworkAddonsConfig: %v", err)
return false
}
newConfig, err := runtimeObjectToNetworkAddonsConfig(e.ObjectNew)
if err != nil {
log.Printf("Failed to convert runtime.Object to NetworkAddonsConfig: %v", err)
return false
}
return !reflect.DeepEqual(oldConfig.Spec, newConfig.Spec)
},
if err := c.Watch(
source.Kind(
mgr.GetCache(),
&cnaov1alpha1.NetworkAddonsConfig{},
&handler.TypedEnqueueRequestForObject[*cnaov1alpha1.NetworkAddonsConfig]{},
&ctrlPredicate[*cnaov1alpha1.NetworkAddonsConfig]{},
),
); err != nil {
return fmt.Errorf("unable to watch NetworkAddonsConfig v1alpha1: %w", err)
}

// Watch for changes to primary resource NetworkAddonsConfig
if err := c.Watch(&source.Kind{Type: &cnaov1alpha1.NetworkAddonsConfig{}}, &handler.EnqueueRequestForObject{}, pred); err != nil {
return err
}
if err := c.Watch(&source.Kind{Type: &cnaov1.NetworkAddonsConfig{}}, &handler.EnqueueRequestForObject{}, pred); err != nil {
return err
if err := c.Watch(
source.Kind(
mgr.GetCache(),
&cnaov1.NetworkAddonsConfig{},
&handler.TypedEnqueueRequestForObject[*cnaov1.NetworkAddonsConfig]{},
&ctrlPredicate[*cnaov1.NetworkAddonsConfig]{},
),
); err != nil {
return fmt.Errorf("unable to watch NetworkAddonsConfig v1: %w", err)
}

// Create a new controller for Pod resources, this will be used to track state of deployed components
Expand All @@ -163,14 +175,24 @@ func add(mgr manager.Manager, r *ReconcileNetworkAddonsConfig) error {
return err
}

// Watch for changes on DaemonSet and Deployment resources
err = c.Watch(&source.Kind{Type: &appsv1.DaemonSet{}}, &handler.EnqueueRequestForObject{})
if err != nil {
return err
if err := c.Watch(
source.Kind(
mgr.GetCache(),
&appsv1.DaemonSet{},
&handler.TypedEnqueueRequestForObject[*appsv1.DaemonSet]{},
),
); err != nil {
return fmt.Errorf("unable to watch NetworkAddonsConfig v1: %w", err)
}
err = c.Watch(&source.Kind{Type: &appsv1.Deployment{}}, &handler.EnqueueRequestForObject{})
if err != nil {
return err

if err := c.Watch(
source.Kind(
mgr.GetCache(),
&appsv1.Deployment{},
&handler.TypedEnqueueRequestForObject[*appsv1.Deployment]{},
),
); err != nil {
return fmt.Errorf("unable to watch NetworkAddonsConfig v1: %w", err)
}

return nil
Expand Down Expand Up @@ -645,14 +667,12 @@ func isResourceAvailable(kubeClient kubernetes.Interface, name string, group str
return true, nil
}

func runtimeObjectToNetworkAddonsConfig(obj runtime.Object) (*cnao.NetworkAddonsConfig, error) {
// convert the runtime.Object to unstructured.Unstructured
func runtimeObjectToNetworkAddonsConfig(obj interface{}) (*cnao.NetworkAddonsConfig, error) {
unstructuredObj, err := runtime.DefaultUnstructuredConverter.ToUnstructured(obj)
if err != nil {
return nil, err
}

// convert unstructured.Unstructured to a NetworkAddonsConfig
networkAddonsConfig := &cnao.NetworkAddonsConfig{}
if err = runtime.DefaultUnstructuredConverter.FromUnstructured(unstructuredObj, networkAddonsConfig); err != nil {
return nil, err
Expand Down
6 changes: 4 additions & 2 deletions pkg/monitoring/rules/alerts/kubemacpool.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,14 +2,16 @@ package alerts

import (
promv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1"

"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/utils/ptr"
)

var kubemacpoolAlerts = []promv1.Rule{
{
Alert: "KubeMacPoolDuplicateMacsFound",
Expr: intstr.FromString("kubevirt_cnao_kubemacpool_duplicate_macs != 0"),
For: "5m",
For: ptr.To(promv1.Duration("5m")),
Annotations: map[string]string{
"summary": "Duplicate macs found.",
},
Expand All @@ -21,7 +23,7 @@ var kubemacpoolAlerts = []promv1.Rule{
{
Alert: "KubemacpoolDown",
Expr: intstr.FromString("kubevirt_cnao_cr_kubemacpool_aggregated == 1 and kubevirt_cnao_kubemacpool_manager_up == 0"),
For: "5m",
For: ptr.To(promv1.Duration("5m")),
Annotations: map[string]string{
"summary": "KubeMacpool is deployed by CNAO CR but KubeMacpool pod is down.",
},
Expand Down
8 changes: 5 additions & 3 deletions pkg/monitoring/rules/alerts/operator.go
Original file line number Diff line number Diff line change
Expand Up @@ -3,16 +3,18 @@ package alerts
import (
"fmt"

promv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/utils/ptr"

promv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1"
)

func operatorAlerts(namespace string) []promv1.Rule {
return []promv1.Rule{
{
Alert: "CnaoDown",
Expr: intstr.FromString("kubevirt_cnao_operator_up == 0"),
For: "5m",
For: ptr.To(promv1.Duration("5m")),
Annotations: map[string]string{
"summary": "CNAO pod is down.",
},
Expand All @@ -24,7 +26,7 @@ func operatorAlerts(namespace string) []promv1.Rule {
{
Alert: "NetworkAddonsConfigNotReady",
Expr: intstr.FromString(fmt.Sprintf("sum(kubevirt_cnao_cr_ready{namespace='%s'} or vector(0)) == 0", namespace)),
For: "5m",
For: ptr.To(promv1.Duration("5m")),
Annotations: map[string]string{
"summary": "CNAO CR NetworkAddonsConfig is not ready.",
},
Expand Down
14 changes: 6 additions & 8 deletions test/e2e/monitoring/alerts_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,9 +17,7 @@ import (
k8slabels "k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types"
v1 "kubevirt.io/api/core/v1"
kvtests "kubevirt.io/kubevirt/tests"
"kubevirt.io/kubevirt/tests/libvmi"
kvtutil "kubevirt.io/kubevirt/tests/util"
"kubevirt.io/kubevirt/pkg/libvmi"
k8sclient "sigs.k8s.io/controller-runtime/pkg/client"

cnao "github.com/kubevirt/cluster-network-addons-operator/pkg/apis/networkaddonsoperator/shared"
Expand Down Expand Up @@ -160,14 +158,14 @@ var _ = Context("Prometheus Alerts", func() {

AfterEach(func() {
By("deleting test namespace")
err = testenv.Client.Delete(context.Background(), &k8sv1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: kvtutil.NamespaceTestDefault}})
err = testenv.Client.Delete(context.Background(), &k8sv1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: testsuite.NamespaceTestDefault}})
Expect(err).ToNot(HaveOccurred())
})

BeforeEach(func() {
By("creating test namespace that is not managed by kubemacpool (opted-out)")
namespace := &k8sv1.Namespace{ObjectMeta: metav1.ObjectMeta{
Name: kvtutil.NamespaceTestDefault,
Name: testsuite.NamespaceTestDefault,
Labels: map[string]string{
"mutatevirtualmachines.kubemacpool.io": "ignore",
},
Expand All @@ -182,7 +180,7 @@ var _ = Context("Prometheus Alerts", func() {
Expect(err).ToNot(HaveOccurred())

By("cleaning namespace labels, returning the namespace to managed by kubemacpool")
err = cleanNamespaceLabels(kvtutil.NamespaceTestDefault)
err = cleanNamespaceLabels(testsuite.NamespaceTestDefault)
Expect(err).ToNot(HaveOccurred())

By("restaring kubemacpool pods")
Expand All @@ -205,7 +203,7 @@ func newRandomVMI() *v1.VirtualMachineInstance {
libvmi.WithInterface(libvmi.InterfaceDeviceWithMasqueradeBinding()),
libvmi.WithNetwork(v1.DefaultPodNetwork()),
)
vmi.ObjectMeta.Namespace = kvtutil.NamespaceTestDefault
vmi.ObjectMeta.Namespace = testsuite.NamespaceTestDefault
vmi.Spec.Domain.Resources.Requests = k8sv1.ResourceList{}

if checks.IsARM64(testsuite.Arch) {
Expand All @@ -220,7 +218,7 @@ func newRandomVMI() *v1.VirtualMachineInstance {

func createVirtualMachineWithPrimaryInterfaceMacAddress(macAddress string) error {
vmi := newRandomVMI()
vm := kvtests.NewRandomVirtualMachine(vmi, true)
vm := libvmi.NewVirtualMachine(vmi, libvmi.WithRunStrategy(v1.RunStrategyAlways))

vm.Spec.Template.Spec.Domain.Devices.Interfaces[0].MacAddress = macAddress
err := testenv.Client.Create(context.Background(), vm)
Expand Down
2 changes: 1 addition & 1 deletion test/e2e/monitoring/prometheus_utils.go
Original file line number Diff line number Diff line change
Expand Up @@ -109,7 +109,7 @@ func initializePromClient(prometheusUrl string, token string) promApiv1.API {

c, err := promApi.NewClient(promApi.Config{
Address: prometheusUrl,
RoundTripper: promConfig.NewAuthorizationCredentialsRoundTripper("Bearer", promConfig.Secret(token), defaultRoundTripper),
RoundTripper: promConfig.NewAuthorizationCredentialsRoundTripper("Bearer", promConfig.NewInlineSecret(token), defaultRoundTripper),
})
Expect(err).ToNot(HaveOccurred())

Expand Down
3 changes: 3 additions & 0 deletions vendor/github.com/VividCortex/ewma/.gitignore

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

3 changes: 3 additions & 0 deletions vendor/github.com/VividCortex/ewma/.whitesource

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

21 changes: 21 additions & 0 deletions vendor/github.com/VividCortex/ewma/LICENSE

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Loading

0 comments on commit 1af6e7d

Please sign in to comment.