Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
36 changes: 34 additions & 2 deletions tests/cluster_update_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -907,7 +907,10 @@ var _ = When("a virtual mode cluster scales down servers", Label("e2e"), Label(u

nginxPod, _ = virtualCluster.NewNginxPod("")
})

It("will scale down server pods", func() {
By("Scaling down cluster")

var cluster v1beta1.Cluster
ctx := context.Background()

Expand All @@ -920,6 +923,21 @@ var _ = When("a virtual mode cluster scales down servers", Label("e2e"), Label(u
err = k8sClient.Update(ctx, &cluster)
Expect(err).NotTo(HaveOccurred())

Eventually(func(g Gomega) {
serverPods := listServerPods(ctx, virtualCluster)

// Wait for all the server pods to be marked for deletion
for _, serverPod := range serverPods {
g.Expect(serverPod.DeletionTimestamp).NotTo(BeNil())
}
}).
MustPassRepeatedly(5).
WithPolling(time.Second * 5).
WithTimeout(time.Minute * 3).
Should(Succeed())

By("Waiting for cluster to be ready again")

Eventually(func(g Gomega) {
// server pods
serverPods := listServerPods(ctx, virtualCluster)
Expand All @@ -933,16 +951,30 @@ var _ = When("a virtual mode cluster scales down servers", Label("e2e"), Label(u
k8sEndpointSlices, err := virtualCluster.Client.DiscoveryV1().EndpointSlices("default").Get(ctx, "kubernetes", metav1.GetOptions{})
g.Expect(err).ToNot(HaveOccurred())
g.Expect(len(k8sEndpointSlices.Endpoints)).To(Equal(1))
}).
MustPassRepeatedly(5).
WithPolling(time.Second * 5).
WithTimeout(time.Minute * 2).
Should(Succeed())

By("Checking that Nginx Pod is Running")

Eventually(func(g Gomega) {
nginxPod, err = virtualCluster.Client.CoreV1().Pods(nginxPod.Namespace).Get(ctx, nginxPod.Name, metav1.GetOptions{})
g.Expect(err).To(BeNil())

_, cond = pod.GetPodCondition(&nginxPod.Status, v1.PodReady)
// TODO: there is a possible issue where the Pod is not being marked as Ready
// if the kubelet lost the sync with the API server.
// We check for the ContainersReady status (all containers in the pod are ready),
// but this is probably to investigate.
// Related issue (?): https://github.com/kubernetes/kubernetes/issues/82346

_, cond := pod.GetPodCondition(&nginxPod.Status, v1.ContainersReady)
g.Expect(cond).NotTo(BeNil())
g.Expect(cond.Status).To(BeEquivalentTo(metav1.ConditionTrue))
}).
WithPolling(time.Second * 2).
WithTimeout(time.Minute * 5).
WithTimeout(time.Minute).
Should(Succeed())
})
})
125 changes: 75 additions & 50 deletions tests/common_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -14,10 +14,11 @@ import (
"k8s.io/client-go/tools/clientcmd"
"k8s.io/client-go/tools/remotecommand"
"k8s.io/kubectl/pkg/scheme"
"k8s.io/kubernetes/pkg/api/v1/pod"
"k8s.io/utils/ptr"

corev1 "k8s.io/api/core/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"

"github.com/rancher/k3k/k3k-kubelet/translate"
Expand Down Expand Up @@ -87,11 +88,11 @@ func NewVirtualClusters(n int) []*VirtualCluster {
return clusters
}

func NewNamespace() *corev1.Namespace {
func NewNamespace() *v1.Namespace {
GinkgoHelper()

namespace := &corev1.Namespace{ObjectMeta: v1.ObjectMeta{GenerateName: "ns-", Labels: map[string]string{"e2e": "true"}}}
namespace, err := k8s.CoreV1().Namespaces().Create(context.Background(), namespace, v1.CreateOptions{})
namespace := &v1.Namespace{ObjectMeta: metav1.ObjectMeta{GenerateName: "ns-", Labels: map[string]string{"e2e": "true"}}}
namespace, err := k8s.CoreV1().Namespaces().Create(context.Background(), namespace, metav1.CreateOptions{})
Expect(err).To(Not(HaveOccurred()))

return namespace
Expand Down Expand Up @@ -120,15 +121,15 @@ func deleteNamespace(name string) {

By(fmt.Sprintf("Deleting namespace %s", name))

err := k8s.CoreV1().Namespaces().Delete(context.Background(), name, v1.DeleteOptions{
err := k8s.CoreV1().Namespaces().Delete(context.Background(), name, metav1.DeleteOptions{
GracePeriodSeconds: ptr.To[int64](0),
})
Expect(err).To(Not(HaveOccurred()))
}

func NewCluster(namespace string) *v1beta1.Cluster {
return &v1beta1.Cluster{
ObjectMeta: v1.ObjectMeta{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "cluster-",
Namespace: namespace,
},
Expand All @@ -154,36 +155,43 @@ func CreateCluster(cluster *v1beta1.Cluster) {
err := k8sClient.Create(ctx, cluster)
Expect(err).To(Not(HaveOccurred()))

By("Waiting for cluster to be ready")
expectedServers := int(*cluster.Spec.Servers)
expectedAgents := int(*cluster.Spec.Agents)

By(fmt.Sprintf("Waiting for cluster to be ready. Expected servers: %d. Expected agents: %d", expectedServers, expectedAgents))

// track the Eventually status to log for changes
prev := -1

// check that the server Pod and the Kubelet are in Ready state
Eventually(func() bool {
podList, err := k8s.CoreV1().Pods(cluster.Namespace).List(ctx, v1.ListOptions{})
podList, err := k8s.CoreV1().Pods(cluster.Namespace).List(ctx, metav1.ListOptions{})
Expect(err).To(Not(HaveOccurred()))

// all the servers and agents needs to be in a running phase
var serversReady, agentsReady int

for _, pod := range podList.Items {
if pod.Labels["role"] == "server" {
GinkgoLogr.Info(fmt.Sprintf("server pod=%s/%s status=%s", pod.Namespace, pod.Name, pod.Status.Phase))
if pod.Status.Phase == corev1.PodRunning {
serversReady++
}
for _, k3sPod := range podList.Items {
_, cond := pod.GetPodCondition(&k3sPod.Status, v1.PodReady)

// pod not ready
if cond == nil || cond.Status != v1.ConditionTrue {
continue
}

if pod.Labels["type"] == "agent" {
GinkgoLogr.Info(fmt.Sprintf("agent pod=%s/%s status=%s", pod.Namespace, pod.Name, pod.Status.Phase))
if pod.Status.Phase == corev1.PodRunning {
agentsReady++
}
if k3sPod.Labels["role"] == "server" {
serversReady++
}
}

expectedServers := int(*cluster.Spec.Servers)
expectedAgents := int(*cluster.Spec.Agents)
if k3sPod.Labels["type"] == "agent" {
agentsReady++
}
}

By(fmt.Sprintf("serversReady=%d/%d agentsReady=%d/%d", serversReady, expectedServers, agentsReady, expectedAgents))
if prev != (serversReady + agentsReady) {
GinkgoLogr.Info("Waiting for pods to be Ready", "servers", serversReady, "agents", agentsReady, "time", time.Now().Format(time.DateTime))
prev = (serversReady + agentsReady)
}

// the server pods should equal the expected servers, but since in shared mode we also have the kubelet is fine to have more than one
if (serversReady != expectedServers) || (agentsReady < expectedAgents) {
Expand All @@ -193,8 +201,10 @@ func CreateCluster(cluster *v1beta1.Cluster) {
return true
}).
WithTimeout(time.Minute * 5).
WithPolling(time.Second * 5).
WithPolling(time.Second * 10).
Should(BeTrue())

By("Cluster is ready")
}

// NewVirtualK8sClient returns a Kubernetes ClientSet for the virtual cluster
Expand Down Expand Up @@ -236,41 +246,60 @@ func NewVirtualK8sClientAndConfig(cluster *v1beta1.Cluster) (*kubernetes.Clients
return virtualK8sClient, restcfg
}

func (c *VirtualCluster) NewNginxPod(namespace string) (*corev1.Pod, string) {
func (c *VirtualCluster) NewNginxPod(namespace string) (*v1.Pod, string) {
GinkgoHelper()

if namespace == "" {
namespace = "default"
}

nginxPod := &corev1.Pod{
ObjectMeta: v1.ObjectMeta{
nginxPod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "nginx-",
Namespace: namespace,
},
Spec: corev1.PodSpec{
Containers: []corev1.Container{{
Spec: v1.PodSpec{
Containers: []v1.Container{{
Name: "nginx",
Image: "nginx",
}},
},
}

By("Creating Pod")
By("Creating Nginx Pod and waiting for it to be Ready")

ctx := context.Background()
nginxPod, err := c.Client.CoreV1().Pods(nginxPod.Namespace).Create(ctx, nginxPod, v1.CreateOptions{})

var err error

nginxPod, err = c.Client.CoreV1().Pods(nginxPod.Namespace).Create(ctx, nginxPod, metav1.CreateOptions{})
Expect(err).To(Not(HaveOccurred()))

var podIP string
// check that the nginx Pod is up and running in the virtual cluster
Eventually(func(g Gomega) {
nginxPod, err = c.Client.CoreV1().Pods(nginxPod.Namespace).Get(ctx, nginxPod.Name, metav1.GetOptions{})
g.Expect(err).To(Not(HaveOccurred()))

_, cond := pod.GetPodCondition(&nginxPod.Status, v1.PodReady)
g.Expect(cond).NotTo(BeNil())
g.Expect(cond.Status).To(BeEquivalentTo(metav1.ConditionTrue))
}).
WithTimeout(time.Minute).
WithPolling(time.Second).
Should(Succeed())

By(fmt.Sprintf("Nginx Pod is running (%s/%s)", nginxPod.Namespace, nginxPod.Name))

// only check the pod on the host cluster if the mode is shared mode
if c.Cluster.Spec.Mode != v1beta1.SharedClusterMode {
return nginxPod, ""
}

var podIP string

// check that the nginx Pod is up and running in the host cluster
Eventually(func() bool {
podList, err := k8s.CoreV1().Pods(c.Cluster.Namespace).List(ctx, v1.ListOptions{})
podList, err := k8s.CoreV1().Pods(c.Cluster.Namespace).List(ctx, metav1.ListOptions{})
Expect(err).To(Not(HaveOccurred()))

for _, pod := range podList.Items {
Expand All @@ -285,7 +314,7 @@ func (c *VirtualCluster) NewNginxPod(namespace string) (*corev1.Pod, string) {
pod.Name, resourceNamespace, resourceName, pod.Status.Phase, podIP,
)

return pod.Status.Phase == corev1.PodRunning && podIP != ""
return pod.Status.Phase == v1.PodRunning && podIP != ""
}
}

Expand All @@ -295,16 +324,12 @@ func (c *VirtualCluster) NewNginxPod(namespace string) (*corev1.Pod, string) {
WithPolling(time.Second * 5).
Should(BeTrue())

// get the running pod from the virtual cluster
nginxPod, err = c.Client.CoreV1().Pods(nginxPod.Namespace).Get(ctx, nginxPod.Name, v1.GetOptions{})
Expect(err).To(Not(HaveOccurred()))

return nginxPod, podIP
}

// ExecCmd exec command on specific pod and wait the command's output.
func (c *VirtualCluster) ExecCmd(pod *corev1.Pod, command string) (string, string, error) {
option := &corev1.PodExecOptions{
func (c *VirtualCluster) ExecCmd(pod *v1.Pod, command string) (string, string, error) {
option := &v1.PodExecOptions{
Command: []string{"sh", "-c", command},
Stdout: true,
Stderr: true,
Expand Down Expand Up @@ -336,48 +361,48 @@ func restartServerPod(ctx context.Context, virtualCluster *VirtualCluster) {
GinkgoHelper()

labelSelector := "cluster=" + virtualCluster.Cluster.Name + ",role=server"
serverPods, err := k8s.CoreV1().Pods(virtualCluster.Cluster.Namespace).List(ctx, v1.ListOptions{LabelSelector: labelSelector})
serverPods, err := k8s.CoreV1().Pods(virtualCluster.Cluster.Namespace).List(ctx, metav1.ListOptions{LabelSelector: labelSelector})
Expect(err).To(Not(HaveOccurred()))

Expect(len(serverPods.Items)).To(Equal(1))
serverPod := serverPods.Items[0]

GinkgoWriter.Printf("deleting pod %s/%s\n", serverPod.Namespace, serverPod.Name)

err = k8s.CoreV1().Pods(virtualCluster.Cluster.Namespace).Delete(ctx, serverPod.Name, v1.DeleteOptions{})
err = k8s.CoreV1().Pods(virtualCluster.Cluster.Namespace).Delete(ctx, serverPod.Name, metav1.DeleteOptions{})
Expect(err).To(Not(HaveOccurred()))

By("Deleting server pod")

// check that the server pods restarted
Eventually(func() any {
serverPods, err = k8s.CoreV1().Pods(virtualCluster.Cluster.Namespace).List(ctx, v1.ListOptions{LabelSelector: labelSelector})
serverPods, err = k8s.CoreV1().Pods(virtualCluster.Cluster.Namespace).List(ctx, metav1.ListOptions{LabelSelector: labelSelector})
Expect(err).To(Not(HaveOccurred()))
Expect(len(serverPods.Items)).To(Equal(1))
return serverPods.Items[0].DeletionTimestamp
}).WithTimeout(60 * time.Second).WithPolling(time.Second * 5).Should(BeNil())
}

func listServerPods(ctx context.Context, virtualCluster *VirtualCluster) []corev1.Pod {
func listServerPods(ctx context.Context, virtualCluster *VirtualCluster) []v1.Pod {
labelSelector := "cluster=" + virtualCluster.Cluster.Name + ",role=server"

serverPods, err := k8s.CoreV1().Pods(virtualCluster.Cluster.Namespace).List(ctx, v1.ListOptions{LabelSelector: labelSelector})
serverPods, err := k8s.CoreV1().Pods(virtualCluster.Cluster.Namespace).List(ctx, metav1.ListOptions{LabelSelector: labelSelector})
Expect(err).To(Not(HaveOccurred()))

return serverPods.Items
}

func listAgentPods(ctx context.Context, virtualCluster *VirtualCluster) []corev1.Pod {
func listAgentPods(ctx context.Context, virtualCluster *VirtualCluster) []v1.Pod {
labelSelector := fmt.Sprintf("cluster=%s,type=agent,mode=%s", virtualCluster.Cluster.Name, virtualCluster.Cluster.Spec.Mode)

agentPods, err := k8s.CoreV1().Pods(virtualCluster.Cluster.Namespace).List(ctx, v1.ListOptions{LabelSelector: labelSelector})
agentPods, err := k8s.CoreV1().Pods(virtualCluster.Cluster.Namespace).List(ctx, metav1.ListOptions{LabelSelector: labelSelector})
Expect(err).To(Not(HaveOccurred()))

return agentPods.Items
}

// getEnv will get an environment variable from a pod it will return empty string if not found
func getEnv(pod *corev1.Pod, envName string) (string, bool) {
func getEnv(pod *v1.Pod, envName string) (string, bool) {
container := pod.Spec.Containers[0]
for _, envVar := range container.Env {
if envVar.Name == envName {
Expand All @@ -389,7 +414,7 @@ func getEnv(pod *corev1.Pod, envName string) (string, bool) {
}

// isArgFound will return true if the argument passed to the function is found in container args
func isArgFound(pod *corev1.Pod, arg string) bool {
func isArgFound(pod *v1.Pod, arg string) bool {
container := pod.Spec.Containers[0]
for _, cmd := range container.Command {
if strings.Contains(cmd, arg) {
Expand Down
Loading