Skip to content

Commit

Permalink
Meltdown protection if all nodes are not ready (#386)
Browse files Browse the repository at this point in the history
Co-authored-by: Kai Kummerer <[email protected]>
  • Loading branch information
maboehm and Kumm-Kai authored Aug 22, 2024
1 parent 0fa8bfc commit c28f6bc
Show file tree
Hide file tree
Showing 3 changed files with 63 additions and 8 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@ func (r *NodeReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.
return ctrl.Result{}, err
}

readyEndpoints := getReadyEndpointsFromNodes(nodes.Items, svc.Spec.IPFamilies)
readyEndpoints := getEndpointsFromNodes(nodes.Items, svc.Spec.IPFamilies)

// update endpoints
if !EqualLoadBalancerEndpoints(loadBalancers.Items[i].Spec.Endpoints, readyEndpoints) {
Expand Down Expand Up @@ -183,20 +183,30 @@ func getLoadBalancerEndpointFromNode(node coreV1.Node, ipFamilies []coreV1.IPFam
return lbEndpoint
}

func getReadyEndpointsFromNodes(
func getEndpointsFromNodes(
nodes []coreV1.Node,
ipFamilies []coreV1.IPFamily,
) []yawolv1beta1.LoadBalancerEndpoint {
// TODO check if ipFamilies and IPFamilyPolicyType is available?
eps := make([]yawolv1beta1.LoadBalancerEndpoint, 0)
readyEPs := make([]yawolv1beta1.LoadBalancerEndpoint, 0)
nonTermintatingEPs := make([]yawolv1beta1.LoadBalancerEndpoint, 0)
for i := range nodes {
if !isNodeReady(nodes[i]) || isNodeTerminating(nodes[i]) {
if isNodeTerminating(nodes[i]) {
continue
}
eps = append(eps, getLoadBalancerEndpointFromNode(nodes[i], ipFamilies))
ep := getLoadBalancerEndpointFromNode(nodes[i], ipFamilies)
nonTermintatingEPs = append(nonTermintatingEPs, ep)
if isNodeReady(nodes[i]) {
readyEPs = append(readyEPs, ep)
}
}

return eps
// In case all nodes are not ready we assume there is something wrong with the health checks (e.g. all kubelets
// cannot reach the control-plane), but the nodes / workloads themselves might still work. So we don't remove all
// endpoints, instead keeping all non-terminating nodes.
if len(readyEPs) == 0 {
return nonTermintatingEPs
}
return readyEPs
}

func isNodeReady(node coreV1.Node) bool {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -417,5 +417,50 @@ var _ = Describe("Check loadbalancer reconcile", Serial, Ordered, func() {
return nil
}, time.Second*15, time.Millisecond*500).Should(Succeed())
})

It("should keep endpoints of non-terminating nodes if all nodes are not ready", func() {
markNodeNotReady := func(name string) {
GinkgoHelper()
node := &v1.Node{}
Expect(k8sClient.Get(ctx, types.NamespacedName{Name: name}, node)).Should(Succeed())
node.Status.Conditions = []v1.NodeCondition{
{
Type: v1.NodeReady,
Status: v1.ConditionFalse,
LastHeartbeatTime: metav1.Time{},
LastTransitionTime: metav1.Time{},
Reason: "notready",
Message: "notready",
},
}
Expect(k8sClient.Status().Update(ctx, node)).To(Succeed())
}

By("marking one node as not ready")
markNodeNotReady("node2")
assertLBWithOneEndpoint("node1", "10.10.10.10")

By("marking all nodes as not ready")
markNodeNotReady("node1")

Eventually(func(g Gomega) {
g.Expect(k8sClient.Get(ctx, types.NamespacedName{Name: "default--node-test1", Namespace: "default"}, &lb)).To(Succeed())
g.Expect(lb.Spec.Endpoints).To(HaveLen(2))
}, 5*time.Second, 100*time.Millisecond).Should(Succeed(), "expected all nodes to return to the LB endpoints")

By("marking node1 as terminating")
node := &v1.Node{}
Expect(k8sClient.Get(ctx, types.NamespacedName{Name: "node1"}, node)).Should(Succeed())
node.Spec.Taints = []v1.Taint{
{
Key: ToBeDeletedTaint,
Effect: v1.TaintEffectNoSchedule,
Value: "123456789", // unix timestamp
},
}
Expect(k8sClient.Update(ctx, node)).To(Succeed())

assertLBWithOneEndpoint("node2", "10.10.10.11")
})
})
})
Original file line number Diff line number Diff line change
Expand Up @@ -352,7 +352,7 @@ func (r *ServiceReconciler) reconcileNodes(
if err := r.TargetClient.List(ctx, &nodes, &client.ListOptions{}); err != nil {
return err
}
nodeEPs := getReadyEndpointsFromNodes(nodes.Items, svc.Spec.IPFamilies)
nodeEPs := getEndpointsFromNodes(nodes.Items, svc.Spec.IPFamilies)

if !EqualLoadBalancerEndpoints(lb.Spec.Endpoints, nodeEPs) {
if err := r.patchLoadBalancerEndpoints(ctx, lb, nodeEPs); err != nil {
Expand Down

0 comments on commit c28f6bc

Please sign in to comment.