From 5c73100e233e202e2e0be53a62686527a1eef945 Mon Sep 17 00:00:00 2001 From: Nikola Date: Sat, 24 Feb 2024 16:01:52 +0200 Subject: [PATCH] change the way the anps and banps are shown in the explain commnad --- cmd/policy-assistant/examples/example.go | 415 +++++++++++++++++- cmd/policy-assistant/pkg/matcher/builder.go | 60 ++- .../pkg/matcher/builder_tests.go | 82 +++- cmd/policy-assistant/pkg/matcher/explain.go | 156 +++++-- cmd/policy-assistant/pkg/matcher/policy.go | 28 +- .../pkg/matcher/portmatcher.go | 28 ++ cmd/policy-assistant/pkg/matcher/target.go | 71 ++- 7 files changed, 741 insertions(+), 99 deletions(-) diff --git a/cmd/policy-assistant/examples/example.go b/cmd/policy-assistant/examples/example.go index c8974ac3..34100450 100644 --- a/cmd/policy-assistant/examples/example.go +++ b/cmd/policy-assistant/examples/example.go @@ -6,13 +6,13 @@ import ( "sigs.k8s.io/network-policy-api/apis/v1alpha1" ) -var CoreGressRulesCombinedANB []*v1alpha1.AdminNetworkPolicy = []*v1alpha1.AdminNetworkPolicy{ +var CoreGressRulesCombinedANB = []*v1alpha1.AdminNetworkPolicy{ { ObjectMeta: v1.ObjectMeta{ - Name: "default", + Name: "Example ANP", }, Spec: v1alpha1.AdminNetworkPolicySpec{ - Priority: 15, + Priority: 20, Subject: v1alpha1.AdminNetworkPolicySubject{ Namespaces: &v1.LabelSelector{ MatchExpressions: []v1.LabelSelectorRequirement{ @@ -22,9 +22,6 @@ var CoreGressRulesCombinedANB []*v1alpha1.AdminNetworkPolicy = []*v1alpha1.Admin Values: []string{"network-policy-conformance-gryffindor"}, }, }, - MatchLabels: map[string]string{ - "kubernetes.io/metadata.name": "", - }, }, }, Egress: []v1alpha1.AdminNetworkPolicyEgressRule{ @@ -309,6 +306,305 @@ var CoreGressRulesCombinedANB []*v1alpha1.AdminNetworkPolicy = []*v1alpha1.Admin }, }, }, + { + ObjectMeta: v1.ObjectMeta{ + Name: "Example ANP 2", + }, + Spec: v1alpha1.AdminNetworkPolicySpec{ + Priority: 16, + Subject: v1alpha1.AdminNetworkPolicySubject{ + Namespaces: &v1.LabelSelector{ + MatchExpressions: []v1.LabelSelectorRequirement{ + { + Key: "kubernetes.io/metadata.name", + Operator: v1.LabelSelectorOpExists, + Values: []string{"network-policy-conformance-gryffindor"}, + }, + }, + }, + }, + Egress: []v1alpha1.AdminNetworkPolicyEgressRule{ + { + Name: "allow-to-ravenclaw-everything-2", + Action: v1alpha1.AdminNetworkPolicyRuleActionAllow, + To: []v1alpha1.AdminNetworkPolicyPeer{ + { + Namespaces: &v1alpha1.NamespacedPeer{ + NamespaceSelector: &v1.LabelSelector{ + MatchLabels: map[string]string{ + "kubernetes.io/metadata.name": "network-policy-conformance-ravenclaw", + }, + }, + }, + }, + }, + }, + { + Name: "deny-to-ravenclaw-everything-2", + Action: v1alpha1.AdminNetworkPolicyRuleActionDeny, + To: []v1alpha1.AdminNetworkPolicyPeer{ + { + Namespaces: &v1alpha1.NamespacedPeer{ + NamespaceSelector: &v1.LabelSelector{ + MatchLabels: map[string]string{ + "kubernetes.io/metadata.name": "network-policy-conformance-ravenclaw", + }, + }, + }, + }, + }, + }, + { + Name: "pass-to-ravenclaw-everything-2", + Action: v1alpha1.AdminNetworkPolicyRuleActionPass, + To: []v1alpha1.AdminNetworkPolicyPeer{ + { + Namespaces: &v1alpha1.NamespacedPeer{ + NamespaceSelector: &v1.LabelSelector{ + MatchLabels: map[string]string{ + "kubernetes.io/metadata.name": "network-policy-conformance-ravenclaw", + }, + }, + }, + }, + }, + }, + { + Name: "deny-to-slytherin-at-ports-80-53-9003-2", + Action: v1alpha1.AdminNetworkPolicyRuleActionDeny, + To: []v1alpha1.AdminNetworkPolicyPeer{ + { + Namespaces: &v1alpha1.NamespacedPeer{ + NamespaceSelector: &v1.LabelSelector{ + MatchLabels: map[string]string{ + "kubernetes.io/metadata.name": "network-policy-conformance-slytherin", + }, + }, + }, + }, + }, + Ports: &[]v1alpha1.AdminNetworkPolicyPort{ + { + PortNumber: &v1alpha1.Port{Protocol: v12.ProtocolTCP, Port: 80}, + }, + { + PortNumber: &v1alpha1.Port{Protocol: v12.ProtocolUDP, Port: 53}, + }, + { + PortNumber: &v1alpha1.Port{Protocol: v12.ProtocolSCTP, Port: 9003}, + }, + }, + }, + { + Name: "pass-to-slytherin-at-port-80-53-9003-2", + Action: v1alpha1.AdminNetworkPolicyRuleActionPass, + To: []v1alpha1.AdminNetworkPolicyPeer{ + { + Namespaces: &v1alpha1.NamespacedPeer{ + NamespaceSelector: &v1.LabelSelector{ + MatchLabels: map[string]string{ + "kubernetes.io/metadata.name": "network-policy-conformance-slytherin", + }, + }, + }, + }, + }, + Ports: &[]v1alpha1.AdminNetworkPolicyPort{ + { + PortNumber: &v1alpha1.Port{Protocol: v12.ProtocolTCP, Port: 80}, + }, + { + PortNumber: &v1alpha1.Port{Protocol: v12.ProtocolUDP, Port: 53}, + }, + { + PortNumber: &v1alpha1.Port{Protocol: v12.ProtocolSCTP, Port: 9003}, + }, + }, + }, + { + Name: "allow-to-hufflepuff-at-ports-8080-5353-2", + Action: v1alpha1.AdminNetworkPolicyRuleActionAllow, + To: []v1alpha1.AdminNetworkPolicyPeer{ + { + Namespaces: &v1alpha1.NamespacedPeer{ + NamespaceSelector: &v1.LabelSelector{ + MatchLabels: map[string]string{ + "kubernetes.io/metadata.name": "network-policy-conformance-hufflepuff", + }, + }, + }, + }, + }, + Ports: &[]v1alpha1.AdminNetworkPolicyPort{ + { + PortNumber: &v1alpha1.Port{Protocol: v12.ProtocolTCP, Port: 8080}, + }, + { + PortNumber: &v1alpha1.Port{Protocol: v12.ProtocolUDP, Port: 5353}, + }, + { + PortNumber: &v1alpha1.Port{Protocol: v12.ProtocolSCTP, Port: 9003}, + }, + }, + }, + { + Name: "deny-to-hufflepuff-everything-else-2", + Action: v1alpha1.AdminNetworkPolicyRuleActionDeny, + To: []v1alpha1.AdminNetworkPolicyPeer{ + { + Namespaces: &v1alpha1.NamespacedPeer{ + NamespaceSelector: &v1.LabelSelector{ + MatchLabels: map[string]string{ + "kubernetes.io/metadata.name": "network-policy-conformance-hufflepuff", + }, + }, + }, + }, + }, + }, + }, + Ingress: []v1alpha1.AdminNetworkPolicyIngressRule{ + { + Name: "allow-from-ravenclaw-everything-2", + Action: v1alpha1.AdminNetworkPolicyRuleActionAllow, + From: []v1alpha1.AdminNetworkPolicyPeer{ + { + Namespaces: &v1alpha1.NamespacedPeer{ + NamespaceSelector: &v1.LabelSelector{ + MatchLabels: map[string]string{ + "kubernetes.io/metadata.name": "network-policy-conformance-ravenclaw", + }, + }, + }, + }, + }, + }, + { + Name: "deny-from-ravenclaw-everything-2", + Action: v1alpha1.AdminNetworkPolicyRuleActionDeny, + From: []v1alpha1.AdminNetworkPolicyPeer{ + { + Namespaces: &v1alpha1.NamespacedPeer{ + NamespaceSelector: &v1.LabelSelector{ + MatchLabels: map[string]string{ + "kubernetes.io/metadata.name": "network-policy-conformance-ravenclaw", + }, + }, + }, + }, + }, + }, + { + Name: "pass-from-ravenclaw-everything-2", + Action: v1alpha1.AdminNetworkPolicyRuleActionPass, + From: []v1alpha1.AdminNetworkPolicyPeer{ + { + Namespaces: &v1alpha1.NamespacedPeer{ + NamespaceSelector: &v1.LabelSelector{ + MatchLabels: map[string]string{ + "kubernetes.io/metadata.name": "network-policy-conformance-ravenclaw", + }, + }, + }, + }, + }, + }, + { + Name: "deny-from-slytherin-at-port-80-53-9003-2", + Action: v1alpha1.AdminNetworkPolicyRuleActionDeny, + From: []v1alpha1.AdminNetworkPolicyPeer{ + { + Namespaces: &v1alpha1.NamespacedPeer{ + NamespaceSelector: &v1.LabelSelector{ + MatchLabels: map[string]string{ + "kubernetes.io/metadata.name": "network-policy-conformance-slytherin", + }, + }, + }, + }, + }, + Ports: &[]v1alpha1.AdminNetworkPolicyPort{ + { + PortNumber: &v1alpha1.Port{Protocol: v12.ProtocolTCP, Port: 80}, + }, + { + PortNumber: &v1alpha1.Port{Protocol: v12.ProtocolUDP, Port: 53}, + }, + { + PortNumber: &v1alpha1.Port{Protocol: v12.ProtocolSCTP, Port: 9003}, + }, + }, + }, + { + Name: "pass-from-slytherin-at-port-80-53-9003-2", + Action: v1alpha1.AdminNetworkPolicyRuleActionPass, + From: []v1alpha1.AdminNetworkPolicyPeer{ + { + Namespaces: &v1alpha1.NamespacedPeer{ + NamespaceSelector: &v1.LabelSelector{ + MatchLabels: map[string]string{ + "kubernetes.io/metadata.name": "network-policy-conformance-slytherin", + }, + }, + }, + }, + }, + Ports: &[]v1alpha1.AdminNetworkPolicyPort{ + { + PortNumber: &v1alpha1.Port{Protocol: v12.ProtocolTCP, Port: 80}, + }, + { + PortNumber: &v1alpha1.Port{Protocol: v12.ProtocolUDP, Port: 53}, + }, + { + PortNumber: &v1alpha1.Port{Protocol: v12.ProtocolSCTP, Port: 9003}, + }, + }, + }, + { + Name: "allow-from-hufflepuff-at-port-80-5353-9003-2", + Action: v1alpha1.AdminNetworkPolicyRuleActionAllow, + From: []v1alpha1.AdminNetworkPolicyPeer{ + { + Namespaces: &v1alpha1.NamespacedPeer{ + NamespaceSelector: &v1.LabelSelector{ + MatchLabels: map[string]string{ + "kubernetes.io/metadata.name": "network-policy-conformance-hufflepuff", + }, + }, + }, + }, + }, + Ports: &[]v1alpha1.AdminNetworkPolicyPort{ + { + PortNumber: &v1alpha1.Port{Protocol: v12.ProtocolTCP, Port: 80}, + }, + { + PortNumber: &v1alpha1.Port{Protocol: v12.ProtocolUDP, Port: 5353}, + }, + { + PortNumber: &v1alpha1.Port{Protocol: v12.ProtocolSCTP, Port: 9003}, + }, + }, + }, + { + Name: "deny-from-hufflepuff-everything-else-2", + Action: v1alpha1.AdminNetworkPolicyRuleActionDeny, + From: []v1alpha1.AdminNetworkPolicyPeer{ + { + Namespaces: &v1alpha1.NamespacedPeer{ + NamespaceSelector: &v1.LabelSelector{ + MatchLabels: map[string]string{ + "kubernetes.io/metadata.name": "network-policy-conformance-hufflepuff", + }, + }, + }, + }, + }, + }, + }, + }, + }, } var CoreGressRulesCombinedBANB *v1alpha1.BaselineAdminNetworkPolicy = &v1alpha1.BaselineAdminNetworkPolicy{ @@ -318,8 +614,12 @@ var CoreGressRulesCombinedBANB *v1alpha1.BaselineAdminNetworkPolicy = &v1alpha1. Spec: v1alpha1.BaselineAdminNetworkPolicySpec{ Subject: v1alpha1.AdminNetworkPolicySubject{ Namespaces: &v1.LabelSelector{ - MatchLabels: map[string]string{ - "kubernetes.io/metadata.name": "network-policy-conformance-gryffindor", + MatchExpressions: []v1.LabelSelectorRequirement{ + { + Key: "kubernetes.io/metadata.name", + Operator: v1.LabelSelectorOpExists, + Values: []string{"network-policy-conformance-gryffindor"}, + }, }, }, }, @@ -504,3 +804,102 @@ var CoreGressRulesCombinedBANB *v1alpha1.BaselineAdminNetworkPolicy = &v1alpha1. }, }, } + +var SimpleANPs = []*v1alpha1.AdminNetworkPolicy{ + { + ObjectMeta: v1.ObjectMeta{ + Name: "Simple ANP 1", + }, + Spec: v1alpha1.AdminNetworkPolicySpec{ + Priority: 34, + Subject: v1alpha1.AdminNetworkPolicySubject{ + Namespaces: &v1.LabelSelector{ + MatchLabels: map[string]string{ + "test": "test", + }, + }, + }, + Egress: []v1alpha1.AdminNetworkPolicyEgressRule{ + { + Name: "allow-to-ravenclaw-everything", + Action: v1alpha1.AdminNetworkPolicyRuleActionAllow, + To: []v1alpha1.AdminNetworkPolicyPeer{ + { + Namespaces: &v1alpha1.NamespacedPeer{ + NamespaceSelector: &v1.LabelSelector{ + MatchLabels: map[string]string{ + "kubernetes.io/metadata.name": "network-policy-conformance-ravenclaw", + }, + }, + }, + }, + }, + }, + }, + }, + }, + { + ObjectMeta: v1.ObjectMeta{ + Name: "Simple ANP 2", + }, + Spec: v1alpha1.AdminNetworkPolicySpec{ + Priority: 50, + Subject: v1alpha1.AdminNetworkPolicySubject{ + Namespaces: &v1.LabelSelector{ + MatchLabels: map[string]string{ + "test": "test", + }, + }, + }, + Egress: []v1alpha1.AdminNetworkPolicyEgressRule{ + { + Name: "allow-to-ravenclaw-everything", + Action: v1alpha1.AdminNetworkPolicyRuleActionDeny, + To: []v1alpha1.AdminNetworkPolicyPeer{ + { + Namespaces: &v1alpha1.NamespacedPeer{ + NamespaceSelector: &v1.LabelSelector{ + MatchLabels: map[string]string{ + "kubernetes.io/metadata.name": "network-policy-conformance-ravenclaw", + }, + }, + }, + }, + }, + }, + }, + }, + }, +} + +var SimpleBANP *v1alpha1.BaselineAdminNetworkPolicy = &v1alpha1.BaselineAdminNetworkPolicy{ + ObjectMeta: v1.ObjectMeta{ + Name: "Simple BANP", + }, + Spec: v1alpha1.BaselineAdminNetworkPolicySpec{ + Subject: v1alpha1.AdminNetworkPolicySubject{ + Namespaces: &v1.LabelSelector{ + MatchLabels: map[string]string{ + "test": "test", + }, + }, + }, + Egress: []v1alpha1.BaselineAdminNetworkPolicyEgressRule{ + { + Name: "allow-to-ravenclaw-everything", + Action: v1alpha1.BaselineAdminNetworkPolicyRuleActionAllow, + To: []v1alpha1.AdminNetworkPolicyPeer{ + { + Namespaces: &v1alpha1.NamespacedPeer{ + NamespaceSelector: &v1.LabelSelector{ + MatchLabels: map[string]string{ + "kubernetes.io/metadata.name": "network-policy-conformance-ravenclaw", + }, + }, + }, + }, + }, + }, + }, + }, +} diff --git a/cmd/policy-assistant/pkg/matcher/builder.go b/cmd/policy-assistant/pkg/matcher/builder.go index 5eaeffd9..f44a1f59 100644 --- a/cmd/policy-assistant/pkg/matcher/builder.go +++ b/cmd/policy-assistant/pkg/matcher/builder.go @@ -21,21 +21,30 @@ func BuildV1AndV2NetPols(simplify bool, netpols []*networkingv1.NetworkPolicy, a np.AddTarget(false, egress) } + var banpIngress *Target + var banpEgress *Target + + if banp != nil { + // there can only be one BANP by definition + banpIngress, banpEgress = BuildTargetBANP(banp) + np.AddTarget(true, banpIngress) + np.AddTarget(false, banpEgress) + } + priorities := make(map[int32]struct{}) for _, p := range anps { if _, ok := priorities[p.Spec.Priority]; ok { - panic(errors.Errorf("duplicate priorities are undefined. priority: %d", p.Spec.Priority)) + panic(errors.Errorf("duplicate priorities are now allowed. priority: %d", p.Spec.Priority)) } priorities[p.Spec.Priority] = struct{}{} ingress, egress := BuildTargetANP(p) - np.AddTarget(true, ingress) - np.AddTarget(false, egress) - } + if banpIngress != nil && ingress.GetPrimaryKey() == banpIngress.GetPrimaryKey() { + ingress.CombineCommonPeers(banpIngress) + egress.CombineCommonPeers(banpEgress) + + } - if banp != nil { - // there can only be one BANP by definition - ingress, egress := BuildTargetBANP(banp) np.AddTarget(true, ingress) np.AddTarget(false, egress) } @@ -64,16 +73,27 @@ func BuildTarget(netpol *networkingv1.NetworkPolicy) (*Target, *Target) { for _, pType := range netpol.Spec.PolicyTypes { switch pType { case networkingv1.PolicyTypeIngress: + p := map[string][]PeerMatcher{} + ingressPeers := BuildIngressMatcher(policyNamespace, netpol.Spec.Ingress) + if len(ingressPeers) > 0 { + p[""] = ingressPeers + } + ingress = &Target{ SubjectMatcher: NewSubjectV1(policyNamespace, netpol.Spec.PodSelector), SourceRules: []NetPolID{netPolID(netpol)}, - Peers: BuildIngressMatcher(policyNamespace, netpol.Spec.Ingress), + Peers: p, } case networkingv1.PolicyTypeEgress: + p := map[string][]PeerMatcher{} + egressPeers := BuildEgressMatcher(policyNamespace, netpol.Spec.Egress) + if len(egressPeers) > 0 { + p[""] = egressPeers + } egress = &Target{ SubjectMatcher: NewSubjectV1(policyNamespace, netpol.Spec.PodSelector), SourceRules: []NetPolID{netPolID(netpol)}, - Peers: BuildEgressMatcher(policyNamespace, netpol.Spec.Egress), + Peers: p, } } } @@ -218,34 +238,36 @@ func BuildTargetANP(anp *v1alpha1.AdminNetworkPolicy) (*Target, *Target) { ingress = &Target{ SubjectMatcher: NewSubjectAdmin(&anp.Spec.Subject), SourceRules: []NetPolID{netPolID(anp)}, + Peers: make(map[string][]PeerMatcher), } for _, r := range anp.Spec.Ingress { v := AdminActionToVerdict(r.Action) matchers := BuildPeerMatcherAdmin(r.From, r.Ports) for _, m := range matchers { - matcherAdmin := NewPeerMatcherANP(m, v, int(anp.Spec.Priority), r.Name) - ingress.Peers = append(ingress.Peers, matcherAdmin) + matcherAdmin := NewPeerMatcherANP(m, v, int(anp.Spec.Priority), anp.Name) + k := m.Pod.PrimaryKey() + m.Namespace.PrimaryKey() + m.Port.GetPrimaryKey() + ingress.Peers[k] = append(ingress.Peers[k], matcherAdmin) } } } - if len(anp.Spec.Egress) > 0 { egress = &Target{ SubjectMatcher: NewSubjectAdmin(&anp.Spec.Subject), SourceRules: []NetPolID{netPolID(anp)}, + Peers: make(map[string][]PeerMatcher), } for _, r := range anp.Spec.Egress { v := AdminActionToVerdict(r.Action) matchers := BuildPeerMatcherAdmin(r.To, r.Ports) for _, m := range matchers { - matcherAdmin := NewPeerMatcherANP(m, v, int(anp.Spec.Priority), r.Name) - egress.Peers = append(egress.Peers, matcherAdmin) + matcherAdmin := NewPeerMatcherANP(m, v, int(anp.Spec.Priority), anp.Name) + k := m.Pod.PrimaryKey() + m.Namespace.PrimaryKey() + m.Port.GetPrimaryKey() + egress.Peers[k] = append(egress.Peers[k], matcherAdmin) } } } - return ingress, egress } @@ -261,6 +283,7 @@ func BuildTargetBANP(banp *v1alpha1.BaselineAdminNetworkPolicy) (*Target, *Targe ingress = &Target{ SubjectMatcher: NewSubjectAdmin(&banp.Spec.Subject), SourceRules: []NetPolID{netPolID(banp)}, + Peers: make(map[string][]PeerMatcher), } for _, r := range banp.Spec.Ingress { @@ -268,7 +291,8 @@ func BuildTargetBANP(banp *v1alpha1.BaselineAdminNetworkPolicy) (*Target, *Targe matchers := BuildPeerMatcherAdmin(r.From, r.Ports) for _, m := range matchers { matcherAdmin := NewPeerMatcherBANP(m, v, r.Name) - ingress.Peers = append(ingress.Peers, matcherAdmin) + k := m.Pod.PrimaryKey() + m.Namespace.PrimaryKey() + m.Port.GetPrimaryKey() + ingress.Peers[k] = append(ingress.Peers[k], matcherAdmin) } } } @@ -277,6 +301,7 @@ func BuildTargetBANP(banp *v1alpha1.BaselineAdminNetworkPolicy) (*Target, *Targe egress = &Target{ SubjectMatcher: NewSubjectAdmin(&banp.Spec.Subject), SourceRules: []NetPolID{netPolID(banp)}, + Peers: make(map[string][]PeerMatcher), } for _, r := range banp.Spec.Egress { @@ -284,7 +309,8 @@ func BuildTargetBANP(banp *v1alpha1.BaselineAdminNetworkPolicy) (*Target, *Targe matchers := BuildPeerMatcherAdmin(r.To, r.Ports) for _, m := range matchers { matcherAdmin := NewPeerMatcherBANP(m, v, r.Name) - egress.Peers = append(egress.Peers, matcherAdmin) + k := m.Pod.PrimaryKey() + m.Namespace.PrimaryKey() + m.Port.GetPrimaryKey() + egress.Peers[k] = append(egress.Peers[k], matcherAdmin) } } } diff --git a/cmd/policy-assistant/pkg/matcher/builder_tests.go b/cmd/policy-assistant/pkg/matcher/builder_tests.go index 1f964b98..a4cdb622 100644 --- a/cmd/policy-assistant/pkg/matcher/builder_tests.go +++ b/cmd/policy-assistant/pkg/matcher/builder_tests.go @@ -1,9 +1,12 @@ package matcher import ( + "github.com/mattfenwick/collections/pkg/slice" + "github.com/mattfenwick/cyclonus/examples" "github.com/mattfenwick/cyclonus/pkg/kube/netpol" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" + "golang.org/x/exp/maps" v1 "k8s.io/api/core/v1" networkingv1 "k8s.io/api/networking/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -26,7 +29,7 @@ func RunBuilderTests() { ingress, egress := BuildTarget(netpol.AllowNoIngress) Expect(ingress).ToNot(BeNil()) - Expect(ingress.Peers).To(BeNil()) + Expect(ingress.Peers).To(BeEmpty()) Expect(egress).To(BeNil()) }) @@ -35,7 +38,7 @@ func RunBuilderTests() { ingress, egress := BuildTarget(netpol.AllowNoEgress) Expect(egress).ToNot(BeNil()) - Expect(egress.Peers).To(BeNil()) + Expect(egress.Peers).To(BeEmpty()) Expect(ingress).To(BeNil()) }) @@ -44,10 +47,10 @@ func RunBuilderTests() { ingress, egress := BuildTarget(netpol.AllowNoIngressAllowNoEgress) Expect(egress).ToNot(BeNil()) - Expect(egress.Peers).To(BeNil()) + Expect(egress.Peers).To(BeEmpty()) Expect(ingress).ToNot(BeNil()) - Expect(ingress.Peers).To(BeNil()) + Expect(ingress.Peers).To(BeEmpty()) }) }) @@ -73,7 +76,7 @@ func RunBuilderTests() { ingress, egress := BuildTarget(netpol.AllowNoIngress_EmptyIngress) Expect(ingress).ToNot(BeNil()) - Expect(ingress.Peers).To(BeNil()) + Expect(ingress.Peers).To(BeEmpty()) Expect(egress).To(BeNil()) }) @@ -82,7 +85,7 @@ func RunBuilderTests() { ingress, egress := BuildTarget(netpol.AllowNoEgress_EmptyEgress) Expect(egress).ToNot(BeNil()) - Expect(egress.Peers).To(BeNil()) + Expect(egress.Peers).To(BeEmpty()) Expect(ingress).To(BeNil()) }) @@ -91,10 +94,10 @@ func RunBuilderTests() { ingress, egress := BuildTarget(netpol.AllowNoIngressAllowNoEgress_EmptyEgressEmptyIngress) Expect(egress).ToNot(BeNil()) - Expect(egress.Peers).To(BeNil()) + Expect(egress.Peers).To(BeEmpty()) Expect(ingress).ToNot(BeNil()) - Expect(ingress.Peers).To(BeNil()) + Expect(ingress.Peers).To(BeEmpty()) }) }) @@ -103,21 +106,25 @@ func RunBuilderTests() { ingress, egress := BuildTarget(netpol.AllowAllIngress) Expect(egress).To(BeNil()) - Expect(ingress.Peers).To(Equal([]PeerMatcher{AllPeersPorts})) + Expect(ingress.Peers).To(Equal(map[string][]PeerMatcher{ + "": {AllPeersPorts}, + })) }) It("allow-all-egress", func() { ingress, egress := BuildTarget(netpol.AllowAllEgress) - Expect(egress.Peers).To(Equal([]PeerMatcher{AllPeersPorts})) + Expect(egress.Peers).To(Equal(map[string][]PeerMatcher{ + "": {AllPeersPorts}, + })) Expect(ingress).To(BeNil()) }) It("allow-all-both", func() { ingress, egress := BuildTarget(netpol.AllowAllIngressAllowAllEgress) - Expect(egress.Peers).To(Equal([]PeerMatcher{AllPeersPorts})) - Expect(ingress.Peers).To(Equal([]PeerMatcher{AllPeersPorts})) + Expect(egress.Peers).To(Equal(map[string][]PeerMatcher{"": {AllPeersPorts}})) + Expect(ingress.Peers).To(Equal(map[string][]PeerMatcher{"": {AllPeersPorts}})) }) }) @@ -339,4 +346,55 @@ func RunBuilderTests() { }}})) }) }) + + Describe("BuildV1AndV2NetPols", func() { + It("it combines ANPs with same subject", func() { + result := BuildV1AndV2NetPols(false, nil, examples.SimpleANPs, nil) + Expect(result.Egress).To(HaveLen(1)) + k := maps.Keys(result.Egress) + firstRule := result.Egress[k[0]] + Expect(firstRule.SourceRules).To(HaveLen(2)) + + }) + + It("it combines ANPs with same peers and protocol targets", func() { + result := BuildV1AndV2NetPols(false, nil, examples.SimpleANPs, nil) + k := maps.Keys(result.Egress) + firstRule := result.Egress[k[0]] + pk := maps.Keys(firstRule.Peers) + peers := firstRule.Peers[pk[0]] + Expect(peers).To(HaveLen(2)) + + }) + + It("it adds BANPs rules to ANPs if peers and protoc match", func() { + result := BuildV1AndV2NetPols(false, nil, examples.SimpleANPs, examples.SimpleBANP) + k := maps.Keys(result.Egress) + firstRule := result.Egress[k[0]] + pk := maps.Keys(firstRule.Peers) + peers := firstRule.Peers[pk[0]] + anps := slice.Filter(func(a PeerMatcher) bool { + switch t := a.(type) { + case *PeerMatcherAdmin: + return t.effectFromMatch.PolicyKind == AdminNetworkPolicy + default: + return false + } + }, peers) + + Expect(anps).To(HaveLen(2)) + + banps := slice.Filter(func(a PeerMatcher) bool { + switch t := a.(type) { + case *PeerMatcherAdmin: + return t.effectFromMatch.PolicyKind == BaselineAdminNetworkPolicy + default: + return false + } + }, peers) + + Expect(banps).To(HaveLen(1)) + + }) + }) } diff --git a/cmd/policy-assistant/pkg/matcher/explain.go b/cmd/policy-assistant/pkg/matcher/explain.go index ba6880fc..6526ce72 100644 --- a/cmd/policy-assistant/pkg/matcher/explain.go +++ b/cmd/policy-assistant/pkg/matcher/explain.go @@ -2,10 +2,11 @@ package matcher import ( "fmt" - "github.com/mattfenwick/collections/pkg/json" + "golang.org/x/exp/maps" + "golang.org/x/exp/slices" + "k8s.io/apimachinery/pkg/util/sets" "strings" - "github.com/mattfenwick/collections/pkg/slice" "github.com/mattfenwick/cyclonus/pkg/kube" "github.com/olekukonko/tablewriter" "github.com/pkg/errors" @@ -55,33 +56,101 @@ func (s *SliceBuilder) TargetsTableLines(targets []*Target, isIngress bool) { for _, rule := range sourceRules { sourceRulesStrings = append(sourceRulesStrings, string(rule)) } + slices.Sort(sourceRulesStrings) rules := strings.Join(sourceRulesStrings, "\n") s.Prefix = []string{ruleType, target.TargetString(), rules} - if len(target.Peers) == 0 { - s.Append("no pods, no ips", "no actions", "no ports, no protocols") - } else { - for _, peer := range slice.SortOn(func(p PeerMatcher) string { return json.MustMarshalToString(p) }, target.Peers) { - switch a := peer.(type) { - case *PeerMatcherAdmin: - s.PodPeerMatcherTableLines(a.PodPeerMatcher, a.effectFromMatch, a.Name) + for _, peers := range target.Peers { + if len(peers) == 0 { + s.Append("no pods, no ips", "NPv1: All peers allowed", "no ports, no protocols") + continue + } + var subject string + var ports string + var action string + + anps := []*PeerMatcherAdmin{} + bnps := []*PeerMatcherAdmin{} + + for _, p := range peers { + switch t := p.(type) { case *AllPeersMatcher: s.Append("all pods, all ips", "NPv1: All peers allowed", "all ports, all protocols") case *PortsForAllPeersMatcher: - pps := PortMatcherTableLines(a.Port, NetworkPolicyV1) + pps := PortMatcherTableLines(t.Port, NetworkPolicyV1) s.Append("all pods, all ips", "", strings.Join(pps, "\n")) case *IPPeerMatcher: - s.IPPeerMatcherTableLines(a) + s.IPPeerMatcherTableLines(t) case *PodPeerMatcher: - s.PodPeerMatcherTableLines(a, NewV1Effect(true), "") - default: - continue + s.PodPeerMatcherTableLines(t, NewV1Effect(true), "") + case *PeerMatcherAdmin: + subject = resolveSubject(t.PodPeerMatcher) + ports = strings.Join(PortMatcherTableLines(t.PodPeerMatcher.Port, t.effectFromMatch.PolicyKind), "\n") + + switch t.effectFromMatch.PolicyKind { + case AdminNetworkPolicy: + anps = append(anps, t) + case BaselineAdminNetworkPolicy: + bnps = append(bnps, t) + default: + panic("This should not be possible") + } } } + + if len(anps) > 1 { + g := map[string]*AnpGroup{} + for _, v := range anps { + e := string(v.effectFromMatch.Verdict) + if _, ok := g[v.Name]; !ok { + g[v.Name] = &AnpGroup{ + name: v.Name, + priority: v.effectFromMatch.Priority, + effects: sets.New(e), + } + } + g[v.Name].effects.Insert(e) + if v.effectFromMatch.Verdict == Pass { + g[v.Name].hasPass = true + } + } + + groups := maps.Values(g) + + slices.SortFunc(groups, func(a, b *AnpGroup) bool { + return a.priority > b.priority + }) + + r := []string{"ANP:"} + var passPrinted bool + for k, a := range groups { + //Mark this ANP as active for this group if the group has pass in the effects or is last element and no previous anp has pass in the effects + if (a.hasPass && !passPrinted) || (passPrinted == false && k == len(groups)-1) { + r = append(r, fmt.Sprintf(" pri=%d (%s): %s - Active", a.priority, a.name, strings.Join(a.effects.UnsortedList(), ", "))) + passPrinted = true + } else { + r = append(r, fmt.Sprintf(" pri=%d (%s): %s", a.priority, a.name, strings.Join(a.effects.UnsortedList(), ", "))) + } + } + action = strings.Join(r, "\n") + "\n" + } + + if len(bnps) >= 1 { + action += fmt.Sprintf("BNP: %s", bnps[0].effectFromMatch.Verdict) + } + s.Append(subject, action, ports) } + } } +type AnpGroup struct { + name string + priority int + effects sets.Set[string] + hasPass bool +} + func (s *SliceBuilder) IPPeerMatcherTableLines(ip *IPPeerMatcher) { peer := ip.IPBlock.CIDR + "\n" + fmt.Sprintf("except %+v", ip.IPBlock.Except) pps := PortMatcherTableLines(ip.Port, NetworkPolicyV1) @@ -89,31 +158,7 @@ func (s *SliceBuilder) IPPeerMatcherTableLines(ip *IPPeerMatcher) { } func (s *SliceBuilder) PodPeerMatcherTableLines(nsPodMatcher *PodPeerMatcher, e Effect, name string) { - var namespaces string - switch ns := nsPodMatcher.Namespace.(type) { - case *AllNamespaceMatcher: - namespaces = "all" - case *LabelSelectorNamespaceMatcher: - namespaces = kube.LabelSelectorTableLines(ns.Selector) - case *SameLabelsNamespaceMatcher: - namespaces = fmt.Sprintf("Same labels - %s", strings.Join(ns.labels, ", ")) - case *NotSameLabelsNamespaceMatcher: - namespaces = fmt.Sprintf("Not Same labels - %s", strings.Join(ns.labels, ", ")) - case *ExactNamespaceMatcher: - namespaces = ns.Namespace - default: - panic(errors.Errorf("invalid NamespaceMatcher type %T", ns)) - } - var pods string - switch p := nsPodMatcher.Pod.(type) { - case *AllPodMatcher: - pods = "all" - case *LabelSelectorPodMatcher: - pods = kube.LabelSelectorTableLines(p.Selector) - default: - panic(errors.Errorf("invalid PodMatcher type %T", p)) - } - s.Append(fmt.Sprintf("Namespace:\n %s\nPod:\n %s", strings.TrimSpace(namespaces), strings.TrimSpace(pods)), priorityTableLine(e, name), strings.Join(PortMatcherTableLines(nsPodMatcher.Port, e.PolicyKind), "\n")) + s.Append(resolveSubject(nsPodMatcher), priorityTableLine(e, name), strings.Join(PortMatcherTableLines(nsPodMatcher.Port, e.PolicyKind), "\n")) } func PortMatcherTableLines(pm PortMatcher, kind PolicyKind) []string { @@ -148,11 +193,40 @@ func priorityTableLine(e Effect, name string) string { if e.PolicyKind == NetworkPolicyV1 { return "NPv1: All peers allowed" } else if e.PolicyKind == AdminNetworkPolicy { - return fmt.Sprintf("%s (%s): %s (pri=%d)", e.PolicyKind, name, e.Verdict, e.Priority) + return fmt.Sprintf(" (pri=%d) %s: %s ", e.Priority, name, e.Verdict) } else if e.PolicyKind == BaselineAdminNetworkPolicy { - return fmt.Sprintf("%s (%s): %s", e.PolicyKind, name, e.Verdict) + return fmt.Sprintf(" (%s): %s", name, e.Verdict) } else { panic(errors.Errorf("Invalid effect %s", e.PolicyKind)) } +} + +func resolveSubject(nsPodMatcher *PodPeerMatcher) string { + var namespaces string + var pods string + switch ns := nsPodMatcher.Namespace.(type) { + case *AllNamespaceMatcher: + namespaces = "all" + case *LabelSelectorNamespaceMatcher: + namespaces = kube.LabelSelectorTableLines(ns.Selector) + case *SameLabelsNamespaceMatcher: + namespaces = fmt.Sprintf("Same labels - %s", strings.Join(ns.labels, ", ")) + case *NotSameLabelsNamespaceMatcher: + namespaces = fmt.Sprintf("Not Same labels - %s", strings.Join(ns.labels, ", ")) + case *ExactNamespaceMatcher: + namespaces = ns.Namespace + default: + panic(errors.Errorf("invalid NamespaceMatcher type %T", ns)) + } + + switch p := nsPodMatcher.Pod.(type) { + case *AllPodMatcher: + pods = "all" + case *LabelSelectorPodMatcher: + pods = kube.LabelSelectorTableLines(p.Selector) + default: + panic(errors.Errorf("invalid PodMatcher type %T", p)) + } + return fmt.Sprintf("Namespace:\n %s\nPod:\n %s", strings.TrimSpace(namespaces), strings.TrimSpace(pods)) } diff --git a/cmd/policy-assistant/pkg/matcher/policy.go b/cmd/policy-assistant/pkg/matcher/policy.go index 1aa3fc8d..1a1737d7 100644 --- a/cmd/policy-assistant/pkg/matcher/policy.go +++ b/cmd/policy-assistant/pkg/matcher/policy.go @@ -300,19 +300,23 @@ func (p *Policy) IsIngressOrEgressAllowed(traffic *Traffic, isIngress bool) Dire // 3. Check if any matching targets allow this traffic effects := make([]Effect, 0) for _, target := range matchingTargets { - for _, m := range target.Peers { - // check if m is a PeerMatcherAdmin - e := NewV1Effect(true) - matcherAdmin, ok := m.(*PeerMatcherAdmin) - if ok { - e = matcherAdmin.effectFromMatch + for _, g := range target.Peers { + for _, m := range g { + _ = m + _ = peer + // check if m is a PeerMatcherAdmin + e := NewV1Effect(true) + matcherAdmin, ok := m.(*PeerMatcherAdmin) + if ok { + e = matcherAdmin.effectFromMatch + } + + if !m.Matches(subject, peer, traffic.ResolvedPort, traffic.ResolvedPortName, traffic.Protocol) { + e.Verdict = None + } + + effects = append(effects, e) } - - if !m.Matches(subject, peer, traffic.ResolvedPort, traffic.ResolvedPortName, traffic.Protocol) { - e.Verdict = None - } - - effects = append(effects, e) } } diff --git a/cmd/policy-assistant/pkg/matcher/portmatcher.go b/cmd/policy-assistant/pkg/matcher/portmatcher.go index e69a3d8e..df2eda99 100644 --- a/cmd/policy-assistant/pkg/matcher/portmatcher.go +++ b/cmd/policy-assistant/pkg/matcher/portmatcher.go @@ -2,6 +2,7 @@ package matcher import ( "encoding/json" + "fmt" "sort" collectionsjson "github.com/mattfenwick/collections/pkg/json" @@ -12,6 +13,7 @@ import ( type PortMatcher interface { Matches(portInt int, portName string, protocol v1.Protocol) bool + GetPrimaryKey() string } type AllPortMatcher struct{} @@ -26,6 +28,10 @@ func (ap *AllPortMatcher) MarshalJSON() (b []byte, e error) { }) } +func (ap *AllPortMatcher) GetPrimaryKey() string { + return "all ports" +} + // PortProtocolMatcher models a matcher based on: // 1. Protocol // 2. Either a) port number or b) port name. @@ -62,6 +68,10 @@ func (p *PortProtocolMatcher) Equals(other *PortProtocolMatcher) bool { return isIntStringEqual(*p.Port, *other.Port) } +func (p *PortProtocolMatcher) GetPrimaryKey() string { + return fmt.Sprintf("Type: %s, Port: %s, Protocol: %s", "Port Protocol", p.Port.String(), p.Protocol) +} + // PortRangeMatcher works with endports to specify a range of matched numeric ports. type PortRangeMatcher struct { From int @@ -82,6 +92,10 @@ func (prm *PortRangeMatcher) MarshalJSON() (b []byte, e error) { }) } +func (prm *PortRangeMatcher) GetPrimaryKey() string { + return fmt.Sprintf("Type: %s, From: %d, To: %d, Protocol: %s", "port range", prm.From, prm.To, prm.Protocol) +} + // SpecificPortMatcher models the case where traffic must match a named or numbered port type SpecificPortMatcher struct { Ports []*PortProtocolMatcher @@ -110,6 +124,20 @@ func (s *SpecificPortMatcher) MarshalJSON() (b []byte, e error) { }) } +func (s *SpecificPortMatcher) GetPrimaryKey() string { + var p string + for _, v := range s.Ports { + p += v.GetPrimaryKey() + } + + var pr string + for _, v := range s.PortRanges { + pr += v.GetPrimaryKey() + } + + return fmt.Sprintf("Type: %s, Ports: %s, PortRanges: %s", "specific port", p, pr) +} + func (s *SpecificPortMatcher) Combine(other *SpecificPortMatcher) *SpecificPortMatcher { logrus.Debugf("SpecificPortMatcher Combined:\n%s\n", collectionsjson.MustMarshalToString([]interface{}{s, other})) diff --git a/cmd/policy-assistant/pkg/matcher/target.go b/cmd/policy-assistant/pkg/matcher/target.go index 43fd65a8..e124cf3b 100644 --- a/cmd/policy-assistant/pkg/matcher/target.go +++ b/cmd/policy-assistant/pkg/matcher/target.go @@ -2,11 +2,12 @@ package matcher import ( "fmt" - + "github.com/mattfenwick/collections/pkg/slice" "github.com/mattfenwick/cyclonus/pkg/kube" "github.com/pkg/errors" networkingv1 "k8s.io/api/networking/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/sets" "sigs.k8s.io/network-policy-api/apis/v1alpha1" ) @@ -48,7 +49,7 @@ type Target struct { // Peers contains all matchers for a Target. // Order matters for rules in the same ANP or BANP. // Priority matters for rules in different ANPs. - Peers []PeerMatcher + Peers map[string][]PeerMatcher } func (t *Target) String() string { @@ -56,7 +57,7 @@ func (t *Target) String() string { } func (t *Target) Simplify() { - t.Peers = Simplify(t.Peers) + //t.Peers = Simplify(t.Peers) } // Combine creates a new Target combining the egress and ingress rules @@ -69,13 +70,65 @@ func (t *Target) Combine(other *Target) *Target { panic(errors.Errorf("cannot combine targets: primary keys differ -- '%s' vs '%s'", myPk, otherPk)) } + peers := map[string][]PeerMatcher{} + + peers = combinePeers(peers, t.Peers) + peers = combinePeers(peers, other.Peers) + + // ensure that we have only one banp after combining + for k := range peers { + var banp bool + peers[k] = slice.Filter(func(a PeerMatcher) bool { + switch t := a.(type) { + case *PeerMatcherAdmin: + if t.effectFromMatch.PolicyKind == BaselineAdminNetworkPolicy { + if banp { + return false + } + banp = true + return true + + } + return true + default: + return true + } + }, peers[k]) + + } + return &Target{ SubjectMatcher: t.SubjectMatcher, - Peers: append(t.Peers, other.Peers...), - SourceRules: append(t.SourceRules, other.SourceRules...), + Peers: peers, + SourceRules: sets.New(t.SourceRules...).Insert(other.SourceRules...).UnsortedList(), } } +func (t *Target) CombineCommonPeers(other *Target) { + if other == nil || len(other.Peers) == 0 { + return + } + + rules := sets.New(t.SourceRules...) + for k := range t.Peers { + if _, ok := other.Peers[k]; ok { + t.Peers[k] = append(t.Peers[k], other.Peers[k]...) + rules.Insert(other.SourceRules...) + } + } + t.SourceRules = rules.UnsortedList() +} + +func combinePeers(dest map[string][]PeerMatcher, source map[string][]PeerMatcher) map[string][]PeerMatcher { + for i, v := range source { + if _, ok := dest[i]; !ok { + dest[i] = []PeerMatcher{} + } + dest[i] = append(dest[i], v...) + } + return dest +} + // CombineTargetsIgnoringPrimaryKey creates a new v1 target from the given namespace and pod selector, // and combines all the edges and source rules from the original targets into the new target. func CombineTargetsIgnoringPrimaryKey(namespace string, podSelector metav1.LabelSelector, targets []*Target) *Target { @@ -87,10 +140,10 @@ func CombineTargetsIgnoringPrimaryKey(namespace string, podSelector metav1.Label Peers: targets[0].Peers, SourceRules: targets[0].SourceRules, } - for _, t := range targets[1:] { - target.Peers = append(target.Peers, t.Peers...) - target.SourceRules = append(target.SourceRules, t.SourceRules...) - } + //for _, t := range targets[1:] { + // target.Peers = append(target.Peers, t.Peers...) + // target.SourceRules = append(target.SourceRules, t.SourceRules...) + //} return target }