Skip to content
138 changes: 138 additions & 0 deletions internal/controller/config_controller.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,138 @@
/*
Copyright 2023.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

package controller

import (
"context"
"time"

"github.com/giantswarm/microerror"
"github.com/go-logr/logr"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime"
capi "sigs.k8s.io/cluster-api/api/v1beta1"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/predicate"

"github.com/giantswarm/teleport-operator/internal/pkg/config"
"github.com/giantswarm/teleport-operator/internal/pkg/key"
"github.com/giantswarm/teleport-operator/internal/pkg/teleport"
)

// ConfigReconciler reconciles changes to the teleport-operator ConfigMap
type ConfigReconciler struct {
Client client.Client
Log logr.Logger
Scheme *runtime.Scheme
Teleport *teleport.Teleport
Namespace string
}

//+kubebuilder:rbac:groups="",resources=configmaps,verbs=get;list;watch
//+kubebuilder:rbac:groups="",resources=configmaps/status,verbs=get

// Reconcile handles ConfigMap changes for the teleport-operator configuration
func (r *ConfigReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
log := r.Log.WithValues("configmap", req.NamespacedName)

configMap := &corev1.ConfigMap{}
if err := r.Client.Get(ctx, req.NamespacedName, configMap); err != nil {
if apierrors.IsNotFound(err) {
log.Info("ConfigMap deleted, operator will continue with existing configuration")
return ctrl.Result{}, nil
}
return ctrl.Result{}, microerror.Mask(err)
}

log.Info("ConfigMap change detected, triggering cluster reconciliation")

// Parse the new configuration
newConfig, err := config.GetConfigFromConfigMap(ctx, r.Client, r.Namespace)
if err != nil {
log.Error(err, "Failed to parse new configuration from ConfigMap")
return ctrl.Result{}, microerror.Mask(err)
}

// Update the Teleport instance configuration
r.Teleport.Config = newConfig

// Trigger immediate reconciliation of all clusters
if err := r.triggerClusterReconciliation(ctx, log, "ConfigMap updated"); err != nil {
return ctrl.Result{}, microerror.Mask(err)
}

log.Info("Successfully processed ConfigMap change")
return ctrl.Result{}, nil
}

// triggerClusterReconciliation forces immediate reconciliation of all cluster resources
func (r *ConfigReconciler) triggerClusterReconciliation(ctx context.Context, log logr.Logger, reason string) error {
// List all clusters
clusterList := &capi.ClusterList{}
if err := r.Client.List(ctx, clusterList); err != nil {
return microerror.Mask(err)
}

log.Info("Triggering immediate reconciliation for all clusters",
"clusterCount", len(clusterList.Items),
"reason", reason)

// Force reconciliation by adding/updating an annotation
timestamp := time.Now().Format(time.RFC3339)

for i := range clusterList.Items {
cluster := &clusterList.Items[i]

if cluster.Annotations == nil {
cluster.Annotations = make(map[string]string)
}

// Add annotation to trigger reconciliation
cluster.Annotations[key.ConfigUpdateAnnotation] = timestamp

if err := r.Client.Update(ctx, cluster); err != nil {
log.Error(err, "Failed to update cluster to trigger reconciliation",
"cluster", cluster.Name, "namespace", cluster.Namespace)
// Continue with other clusters even if one fails
continue
}

log.V(1).Info("Triggered reconciliation for cluster",
"cluster", cluster.Name, "namespace", cluster.Namespace)
}

return nil
}

// SetupWithManager sets up the controller with the Manager
func (r *ConfigReconciler) SetupWithManager(mgr ctrl.Manager) error {
// Create a predicate to only watch the specific ConfigMap we care about
configMapPredicate := predicate.NewPredicateFuncs(func(object client.Object) bool {
if configMap, ok := object.(*corev1.ConfigMap); ok {
return configMap.Name == key.TeleportOperatorConfigName &&
configMap.Namespace == r.Namespace
}
return false
})

return ctrl.NewControllerManagedBy(mgr).
For(&corev1.ConfigMap{}).
WithEventFilter(configMapPredicate).
Complete(r)
}
190 changes: 190 additions & 0 deletions internal/controller/config_controller_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,190 @@
/*
Copyright 2023.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

package controller

import (
"context"
"testing"

"github.com/go-logr/logr"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
capi "sigs.k8s.io/cluster-api/api/v1beta1"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client/fake"

"github.com/giantswarm/teleport-operator/internal/pkg/key"
"github.com/giantswarm/teleport-operator/internal/pkg/teleport"
)

func TestConfigReconciler_Reconcile(t *testing.T) {
scheme := runtime.NewScheme()
_ = corev1.AddToScheme(scheme)
_ = capi.AddToScheme(scheme)

testCases := []struct {
name string
configMapName string
configMapData map[string]string
configMapExists bool
expectError bool
expectRequeue bool
}{
{
name: "ConfigMap updated - triggers reconciliation",
configMapName: key.TeleportOperatorConfigName,
configMapData: map[string]string{
"proxyAddr": "proxy.example.com:443",
"teleportVersion": "17.0.0",
"managementClusterName": "management",
"appName": "teleport-kube-agent",
"appVersion": "0.12.0",
"appCatalog": "giantswarm",
},
configMapExists: true,
expectError: false,
expectRequeue: false,
},
{
name: "ConfigMap deleted - no action needed",
configMapName: key.TeleportOperatorConfigName,
configMapExists: false,
expectError: false,
expectRequeue: false,
},
{
name: "Different ConfigMap - still triggers reconciliation",
configMapName: "other-configmap",
configMapData: map[string]string{
"somedata": "value",
},
configMapExists: true,
expectError: false,
expectRequeue: false,
},
}

for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
// Create test objects
objects := []runtime.Object{}

// Add a test cluster to verify reconciliation triggering
testCluster := &capi.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: "test-cluster",
Namespace: "default",
},
}
objects = append(objects, testCluster)

// Add ConfigMap if it exists
if tc.configMapExists {
configMap := &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: tc.configMapName,
Namespace: "test-namespace",
},
Data: tc.configMapData,
}
objects = append(objects, configMap)
}

// Always add the teleport-operator ConfigMap for config parsing
// unless we're testing its deletion
if tc.configMapName != key.TeleportOperatorConfigName || tc.configMapExists {
teleportConfigMap := &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: key.TeleportOperatorConfigName,
Namespace: "test-namespace",
},
Data: map[string]string{
"proxyAddr": "proxy.example.com:443",
"teleportVersion": "17.0.0",
"managementClusterName": "management",
"appName": "teleport-kube-agent",
"appVersion": "0.12.0",
"appCatalog": "giantswarm",
},
}
// Only add if we haven't already added it above
if tc.configMapName != key.TeleportOperatorConfigName {
objects = append(objects, teleportConfigMap)
}
}

client := fake.NewClientBuilder().
WithScheme(scheme).
WithRuntimeObjects(objects...).
Build()

reconciler := &ConfigReconciler{
Client: client,
Log: logr.Discard(),
Scheme: scheme,
Teleport: &teleport.Teleport{},
Namespace: "test-namespace",
}

req := ctrl.Request{
NamespacedName: types.NamespacedName{
Name: tc.configMapName,
Namespace: "test-namespace",
},
}

// Execute reconcile
result, err := reconciler.Reconcile(context.Background(), req)

// Verify results
if tc.expectError && err == nil {
t.Error("Expected error but got none")
}
if !tc.expectError && err != nil {
t.Errorf("Unexpected error: %v", err)
}

if tc.expectRequeue && result.Requeue == false {
t.Error("Expected requeue but got none")
}
if !tc.expectRequeue && result.Requeue == true {
t.Error("Expected no requeue but got requeue")
}

// For successful ConfigMap updates, verify cluster annotation was added
// Note: Any ConfigMap change triggers reconciliation, not just teleport-operator ConfigMap
// because the predicate filtering happens at the manager level, not in Reconcile
if tc.expectError == false && tc.configMapExists {
updatedCluster := &capi.Cluster{}
err := client.Get(context.Background(), types.NamespacedName{
Name: "test-cluster",
Namespace: "default",
}, updatedCluster)

if err != nil {
t.Errorf("Failed to get updated cluster: %v", err)
} else {
if updatedCluster.Annotations == nil || updatedCluster.Annotations[key.ConfigUpdateAnnotation] == "" {
t.Error("Expected config update annotation on cluster but found none")
}
}
}
})
}
}
1 change: 1 addition & 0 deletions internal/pkg/key/key.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@ const (
TeleportAppTokenValidity = 720 * time.Hour
TeleportKubeTokenValidity = 720 * time.Hour
TeleportNodeTokenValidity = 720 * time.Hour
ConfigUpdateAnnotation = "teleport-operator.giantswarm.io/config-updated"

AppCatalog = "appCatalog"
AppName = "appName"
Expand Down
12 changes: 12 additions & 0 deletions main.go
Original file line number Diff line number Diff line change
Expand Up @@ -135,6 +135,18 @@ func main() {
setupLog.Error(err, "unable to create controller", "controller", "Cluster")
os.Exit(1)
}

// Setup ConfigMap controller to watch for teleport-operator config changes
if err = (&controller.ConfigReconciler{
Client: mgr.GetClient(),
Log: ctrl.Log.WithName("controllers").WithName("Config"),
Scheme: mgr.GetScheme(),
Teleport: tele,
Namespace: namespace,
}).SetupWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "Config")
os.Exit(1)
}
//+kubebuilder:scaffold:builder

if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil {
Expand Down