Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -108,6 +108,9 @@ validate: generate docs fmt ## Validate the project checking for any dependency
.PHONY: install
install: ## Install K3k with Helm on the targeted Kubernetes cluster
helm upgrade --install --namespace k3k-system --create-namespace \
--set controller.extraEnv[0].name=LOG_FORMAT \
--set controller.extraEnv[0].value=console \
--set controller.extraEnv[1].name=DEBUG \
--set controller.image.repository=$(REPO)/k3k \
--set controller.image.tag=$(VERSION) \
--set agent.shared.image.repository=$(REPO)/k3k-kubelet \
Expand Down
9 changes: 5 additions & 4 deletions pkg/controller/cluster/agent/agent.go
Original file line number Diff line number Diff line change
Expand Up @@ -42,23 +42,24 @@ func configSecretName(clusterName string) string {
}

func ensureObject(ctx context.Context, cfg *Config, obj ctrlruntimeclient.Object) error {
log := ctrl.LoggerFrom(ctx)

key := ctrlruntimeclient.ObjectKeyFromObject(obj)

log.Info(fmt.Sprintf("ensuring %T", obj), "key", key)
log := ctrl.LoggerFrom(ctx).WithValues("key", key)

if err := controllerutil.SetControllerReference(cfg.cluster, obj, cfg.scheme); err != nil {
return err
}

if err := cfg.client.Create(ctx, obj); err != nil {
if apierrors.IsAlreadyExists(err) {
log.V(1).Info(fmt.Sprintf("Resource %T already exists, updating.", obj))

return cfg.client.Update(ctx, obj)
}

return err
}

log.V(1).Info(fmt.Sprintf("Creating %T.", obj))

return nil
}
52 changes: 29 additions & 23 deletions pkg/controller/cluster/cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -161,10 +161,8 @@ func namespaceEventHandler(r *ClusterReconciler) handler.Funcs {
}

func (c *ClusterReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
log := ctrl.LoggerFrom(ctx).WithValues("cluster", req.NamespacedName)
ctx = ctrl.LoggerInto(ctx, log) // enrich the current logger

log.Info("reconciling cluster")
log := ctrl.LoggerFrom(ctx)
log.Info("Reconciling Cluster")

var cluster v1beta1.Cluster
if err := c.Client.Get(ctx, req.NamespacedName, &cluster); err != nil {
Expand All @@ -178,6 +176,8 @@ func (c *ClusterReconciler) Reconcile(ctx context.Context, req reconcile.Request

// Set initial status if not already set
if cluster.Status.Phase == "" || cluster.Status.Phase == v1beta1.ClusterUnknown {
log.V(1).Info("Updating Cluster status phase")

cluster.Status.Phase = v1beta1.ClusterProvisioning
meta.SetStatusCondition(&cluster.Status.Conditions, metav1.Condition{
Type: ConditionReady,
Expand All @@ -195,6 +195,8 @@ func (c *ClusterReconciler) Reconcile(ctx context.Context, req reconcile.Request

// add finalizer
if controllerutil.AddFinalizer(&cluster, clusterFinalizerName) {
log.V(1).Info("Updating Cluster adding finalizer")

if err := c.Client.Update(ctx, &cluster); err != nil {
return reconcile.Result{}, err
}
Expand All @@ -207,6 +209,8 @@ func (c *ClusterReconciler) Reconcile(ctx context.Context, req reconcile.Request
reconcilerErr := c.reconcileCluster(ctx, &cluster)

if !equality.Semantic.DeepEqual(orig.Status, cluster.Status) {
log.Info("Updating Cluster status")

if err := c.Client.Status().Update(ctx, &cluster); err != nil {
return reconcile.Result{}, err
}
Expand All @@ -215,7 +219,7 @@ func (c *ClusterReconciler) Reconcile(ctx context.Context, req reconcile.Request
// if there was an error during the reconciliation, return
if reconcilerErr != nil {
if errors.Is(reconcilerErr, bootstrap.ErrServerNotReady) {
log.Info("server not ready, requeueing")
log.V(1).Info("Server not ready, requeueing")
return reconcile.Result{RequeueAfter: time.Second * 10}, nil
}

Expand All @@ -224,6 +228,8 @@ func (c *ClusterReconciler) Reconcile(ctx context.Context, req reconcile.Request

// update Cluster if needed
if !equality.Semantic.DeepEqual(orig.Spec, cluster.Spec) {
log.Info("Updating Cluster")

if err := c.Client.Update(ctx, &cluster); err != nil {
return reconcile.Result{}, err
}
Expand All @@ -234,7 +240,7 @@ func (c *ClusterReconciler) Reconcile(ctx context.Context, req reconcile.Request

func (c *ClusterReconciler) reconcileCluster(ctx context.Context, cluster *v1beta1.Cluster) error {
err := c.reconcile(ctx, cluster)
c.updateStatus(cluster, err)
c.updateStatus(ctx, cluster, err)

return err
}
Expand Down Expand Up @@ -264,7 +270,7 @@ func (c *ClusterReconciler) reconcile(ctx context.Context, cluster *v1beta1.Clus
// if the Version is not specified we will try to use the same Kubernetes version of the host.
// This version is stored in the Status object, and it will not be updated if already set.
if cluster.Spec.Version == "" && cluster.Status.HostVersion == "" {
log.Info("cluster version not set")
log.V(1).Info("Cluster version not set. Using host version.")

hostVersion, err := c.DiscoveryClient.ServerVersion()
if err != nil {
Expand Down Expand Up @@ -295,7 +301,7 @@ func (c *ClusterReconciler) reconcile(ctx context.Context, cluster *v1beta1.Clus
if cluster.Status.ServiceCIDR == "" {
// in shared mode try to lookup the serviceCIDR
if cluster.Spec.Mode == v1beta1.SharedClusterMode {
log.Info("looking up Service CIDR for shared mode")
log.V(1).Info("Looking up Service CIDR for shared mode")

cluster.Status.ServiceCIDR, err = c.lookupServiceCIDR(ctx)
if err != nil {
Expand All @@ -307,7 +313,7 @@ func (c *ClusterReconciler) reconcile(ctx context.Context, cluster *v1beta1.Clus

// in virtual mode assign a default serviceCIDR
if cluster.Spec.Mode == v1beta1.VirtualClusterMode {
log.Info("assign default service CIDR for virtual mode")
log.V(1).Info("assign default service CIDR for virtual mode")

cluster.Status.ServiceCIDR = defaultVirtualServiceCIDR
}
Expand Down Expand Up @@ -354,7 +360,7 @@ func (c *ClusterReconciler) reconcile(ctx context.Context, cluster *v1beta1.Clus
// ensureBootstrapSecret will create or update the Secret containing the bootstrap data from the k3s server
func (c *ClusterReconciler) ensureBootstrapSecret(ctx context.Context, cluster *v1beta1.Cluster, serviceIP, token string) error {
log := ctrl.LoggerFrom(ctx)
log.Info("ensuring bootstrap secret")
log.V(1).Info("Ensuring bootstrap secret")

bootstrapData, err := bootstrap.GenerateBootstrapData(ctx, cluster, serviceIP, token)
if err != nil {
Expand Down Expand Up @@ -386,7 +392,7 @@ func (c *ClusterReconciler) ensureBootstrapSecret(ctx context.Context, cluster *
// ensureKubeconfigSecret will create or update the Secret containing the kubeconfig data from the k3s server
func (c *ClusterReconciler) ensureKubeconfigSecret(ctx context.Context, cluster *v1beta1.Cluster, serviceIP string, port int) error {
log := ctrl.LoggerFrom(ctx)
log.Info("ensuring kubeconfig secret")
log.V(1).Info("Ensuring Kubeconfig Secret")

adminKubeconfig := kubeconfig.New()

Expand Down Expand Up @@ -460,7 +466,7 @@ func (c *ClusterReconciler) createClusterConfigs(ctx context.Context, cluster *v

func (c *ClusterReconciler) ensureNetworkPolicy(ctx context.Context, cluster *v1beta1.Cluster) error {
log := ctrl.LoggerFrom(ctx)
log.Info("ensuring network policy")
log.V(1).Info("Ensuring network policy")

networkPolicyName := controller.SafeConcatNameWithPrefix(cluster.Name)

Expand Down Expand Up @@ -544,15 +550,15 @@ func (c *ClusterReconciler) ensureNetworkPolicy(ctx context.Context, cluster *v1

key := client.ObjectKeyFromObject(currentNetworkPolicy)
if result != controllerutil.OperationResultNone {
log.Info("cluster network policy updated", "key", key, "result", result)
log.V(1).Info("Cluster network policy updated", "key", key, "result", result)
}

return nil
}

func (c *ClusterReconciler) ensureClusterService(ctx context.Context, cluster *v1beta1.Cluster) (*v1.Service, error) {
log := ctrl.LoggerFrom(ctx)
log.Info("ensuring cluster service")
log.V(1).Info("Ensuring Cluster Service")

expectedService := server.Service(cluster)
currentService := expectedService.DeepCopy()
Expand All @@ -572,15 +578,15 @@ func (c *ClusterReconciler) ensureClusterService(ctx context.Context, cluster *v

key := client.ObjectKeyFromObject(currentService)
if result != controllerutil.OperationResultNone {
log.Info("cluster service updated", "key", key, "result", result)
log.V(1).Info("Cluster service updated", "key", key, "result", result)
}

return currentService, nil
}

func (c *ClusterReconciler) ensureIngress(ctx context.Context, cluster *v1beta1.Cluster) error {
log := ctrl.LoggerFrom(ctx)
log.Info("ensuring cluster ingress")
log.V(1).Info("Ensuring cluster ingress")

expectedServerIngress := server.Ingress(ctx, cluster)

Expand Down Expand Up @@ -608,7 +614,7 @@ func (c *ClusterReconciler) ensureIngress(ctx context.Context, cluster *v1beta1.

key := client.ObjectKeyFromObject(currentServerIngress)
if result != controllerutil.OperationResultNone {
log.Info("cluster ingress updated", "key", key, "result", result)
log.V(1).Info("Cluster ingress updated", "key", key, "result", result)
}

return nil
Expand Down Expand Up @@ -650,7 +656,7 @@ func (c *ClusterReconciler) server(ctx context.Context, cluster *v1beta1.Cluster

if result != controllerutil.OperationResultNone {
key := client.ObjectKeyFromObject(currentServerStatefulSet)
log.Info("ensuring serverStatefulSet", "key", key, "result", result)
log.V(1).Info("Ensuring server StatefulSet", "key", key, "result", result)
}

return err
Expand Down Expand Up @@ -753,7 +759,7 @@ func (c *ClusterReconciler) lookupServiceCIDR(ctx context.Context) (string, erro
// Try to look for the serviceCIDR creating a failing service.
// The error should contain the expected serviceCIDR

log.Info("looking up serviceCIDR from a failing service creation")
log.V(1).Info("Looking up Service CIDR from a failing service creation")

failingSvc := v1.Service{
ObjectMeta: metav1.ObjectMeta{Name: "fail", Namespace: "default"},
Expand All @@ -765,7 +771,7 @@ func (c *ClusterReconciler) lookupServiceCIDR(ctx context.Context) (string, erro

if len(splittedErrMsg) > 1 {
serviceCIDR := strings.TrimSpace(splittedErrMsg[1])
log.Info("found serviceCIDR from failing service creation: " + serviceCIDR)
log.V(1).Info("Found Service CIDR from failing service creation: " + serviceCIDR)

// validate serviceCIDR
_, serviceCIDRAddr, err := net.ParseCIDR(serviceCIDR)
Expand All @@ -779,7 +785,7 @@ func (c *ClusterReconciler) lookupServiceCIDR(ctx context.Context) (string, erro

// Try to look for the the kube-apiserver Pod, and look for the '--service-cluster-ip-range' flag.

log.Info("looking up serviceCIDR from kube-apiserver pod")
log.V(1).Info("Looking up Service CIDR from kube-apiserver pod")

matchingLabels := client.MatchingLabels(map[string]string{
"component": "kube-apiserver",
Expand All @@ -802,12 +808,12 @@ func (c *ClusterReconciler) lookupServiceCIDR(ctx context.Context) (string, erro
for _, arg := range apiServerArgs {
if strings.HasPrefix(arg, "--service-cluster-ip-range=") {
serviceCIDR := strings.TrimPrefix(arg, "--service-cluster-ip-range=")
log.Info("found serviceCIDR from kube-apiserver pod: " + serviceCIDR)
log.V(1).Info("Found Service CIDR from kube-apiserver pod: " + serviceCIDR)

// validate serviceCIDR
_, serviceCIDRAddr, err := net.ParseCIDR(serviceCIDR)
if err != nil {
log.Error(err, "serviceCIDR is not valid")
log.Error(err, "Service CIDR is not valid")
break
}

Expand Down
9 changes: 7 additions & 2 deletions pkg/controller/cluster/cluster_finalize.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ import (

func (c *ClusterReconciler) finalizeCluster(ctx context.Context, cluster *v1beta1.Cluster) (reconcile.Result, error) {
log := ctrl.LoggerFrom(ctx)
log.Info("finalizing Cluster")
log.V(1).Info("Deleting Cluster")

// Set the Terminating phase and condition
cluster.Status.Phase = v1beta1.ClusterTerminating
Expand All @@ -40,7 +40,7 @@ func (c *ClusterReconciler) finalizeCluster(ctx context.Context, cluster *v1beta

// Deallocate ports for kubelet and webhook if used
if cluster.Spec.Mode == v1beta1.SharedClusterMode && cluster.Spec.MirrorHostNodes {
log.Info("dellocating ports for kubelet and webhook")
log.V(1).Info("dellocating ports for kubelet and webhook")

if err := c.PortAllocator.DeallocateKubeletPort(ctx, cluster.Name, cluster.Namespace, cluster.Status.KubeletPort); err != nil {
return reconcile.Result{}, err
Expand All @@ -53,6 +53,8 @@ func (c *ClusterReconciler) finalizeCluster(ctx context.Context, cluster *v1beta

// Remove finalizer from the cluster and update it only when all resources are cleaned up
if controllerutil.RemoveFinalizer(cluster, clusterFinalizerName) {
log.Info("Deleting Cluster removing finalizer")

if err := c.Client.Update(ctx, cluster); err != nil {
return reconcile.Result{}, err
}
Expand All @@ -62,6 +64,9 @@ func (c *ClusterReconciler) finalizeCluster(ctx context.Context, cluster *v1beta
}

func (c *ClusterReconciler) unbindClusterRoles(ctx context.Context, cluster *v1beta1.Cluster) error {
log := ctrl.LoggerFrom(ctx)
log.V(1).Info("Unbinding ClusterRoles")

clusterRoles := []string{"k3k-kubelet-node", "k3k-priorityclass"}

var err error
Expand Down
9 changes: 3 additions & 6 deletions pkg/controller/cluster/pod.go
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,6 @@ import (
"sigs.k8s.io/controller-runtime/pkg/reconcile"

v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
ctrl "sigs.k8s.io/controller-runtime"
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
Expand Down Expand Up @@ -44,14 +43,10 @@ func AddPodController(ctx context.Context, mgr manager.Manager, maxConcurrentRec

func (r *PodReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
log := ctrl.LoggerFrom(ctx)
log.Info("reconciling pod")
log.V(1).Info("Reconciling Pod")

var pod v1.Pod
if err := r.Client.Get(ctx, req.NamespacedName, &pod); err != nil {
if !apierrors.IsNotFound(err) {
return reconcile.Result{}, err
}

return reconcile.Result{}, ctrlruntimeclient.IgnoreNotFound(err)
}

Expand All @@ -74,6 +69,8 @@ func (r *PodReconciler) Reconcile(ctx context.Context, req reconcile.Request) (r
},
}

log.V(1).Info("Deleting Virtual Pod", "name", virtName, "namespace", virtNamespace)

return reconcile.Result{}, ctrlruntimeclient.IgnoreNotFound(virtualClient.Delete(ctx, &virtPod))
}

Expand Down
7 changes: 5 additions & 2 deletions pkg/controller/cluster/service.go
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ func AddServiceController(ctx context.Context, mgr manager.Manager, maxConcurren

func (r *ServiceReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
log := ctrl.LoggerFrom(ctx)
log.Info("ensuring service status to virtual cluster")
log.V(1).Info("Reconciling Service")

var hostService v1.Service
if err := r.HostClient.Get(ctx, req.NamespacedName, &hostService); err != nil {
Expand All @@ -53,7 +53,7 @@ func (r *ServiceReconciler) Reconcile(ctx context.Context, req reconcile.Request
virtualServiceNamespace, virtualServiceNamespaceFound := hostService.Annotations[translate.ResourceNamespaceAnnotation]

if !virtualServiceNameFound || !virtualServiceNamespaceFound {
log.V(1).Info(fmt.Sprintf("service %s/%s does not have virtual service annotations, skipping", hostService.Namespace, hostService.Name))
log.V(1).Info(fmt.Sprintf("Service %s/%s does not have virtual service annotations, skipping", hostService.Namespace, hostService.Name))
return reconcile.Result{}, nil
}

Expand All @@ -80,7 +80,10 @@ func (r *ServiceReconciler) Reconcile(ctx context.Context, req reconcile.Request
}

if !equality.Semantic.DeepEqual(virtualService.Status.LoadBalancer, hostService.Status.LoadBalancer) {
log.V(1).Info("Updating Virtual Service Status", "name", virtualServiceName, "namespace", virtualServiceNamespace)

virtualService.Status.LoadBalancer = hostService.Status.LoadBalancer

if err := virtualClient.Status().Update(ctx, &virtualService); err != nil {
return reconcile.Result{}, err
}
Expand Down
Loading
Loading