Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 3 additions & 1 deletion api/gorch/v1alpha1/guardrailsorchestrator_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,9 @@ type GuardrailsOrchestratorSpec struct {
// INSERT ADDITIONAL SPEC FIELDS - desired state of cluster
// Important: Run "make" to regenerate code after modifying this file
// Number of replicas
Replicas int32 `json:"replicas"`
// +kubebuilder:validation:Minimum=1
// +kubebuilder:default=1
Replicas int32 `json:"replicas,omitempty"`
// Name of configmap containing generator, detector, and chunker arguments
// +optional
OrchestratorConfig *string `json:"orchestratorConfig,omitempty"`
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -216,20 +216,20 @@ spec:
type: string
type: object
replicas:
default: 1
description: |-
INSERT ADDITIONAL SPEC FIELDS - desired state of cluster
Important: Run "make" to regenerate code after modifying this file
Number of replicas
format: int32
minimum: 1
type: integer
tlsSecrets:
description: Define TLS secrets to be mounted to the orchestrator.
Secrets will be mounted at /etc/tls/$SECRET_NAME
items:
type: string
type: array
required:
- replicas
type: object
status:
properties:
Expand Down
72 changes: 58 additions & 14 deletions controllers/gorch/guardrailsorchestrator_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,9 @@ package gorch
import (
"context"
"fmt"
"strconv"
"time"

"github.com/go-logr/logr"
kservev1beta1 "github.com/kserve/kserve/pkg/apis/serving/v1beta1"
monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1"
Expand All @@ -28,8 +31,6 @@ import (
"sigs.k8s.io/controller-runtime/pkg/builder"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/predicate"
"strconv"
"time"

gorchv1alpha1 "github.com/trustyai-explainability/trustyai-service-operator/api/gorch/v1alpha1"
appsv1 "k8s.io/api/apps/v1"
Expand Down Expand Up @@ -113,6 +114,23 @@ func (r *GuardrailsOrchestratorReconciler) handleReconciliationErrorWithTrace(ct
})
}

// appendTLSSecretsToMounts appends TLS secrets from the orchestrator spec to the tlsMounts slice
func appendTLSSecretsToMounts(orchestrator *gorchv1alpha1.GuardrailsOrchestrator, tlsMounts []gorchv1alpha1.DetectedService) []gorchv1alpha1.DetectedService {
if orchestrator.Spec.TLSSecrets != nil && len(*orchestrator.Spec.TLSSecrets) > 0 {
for _, tlsSecret := range *orchestrator.Spec.TLSSecrets {
tlsMounts = append(tlsMounts, gorchv1alpha1.DetectedService{
Name: "",
Type: "",
Scheme: "",
Hostname: "",
Port: "",
TLSSecret: tlsSecret,
})
}
}
return tlsMounts
}

// Reconcile is part of the main kubernetes reconciliation loop which aims to
// move the current state of the cluster closer to the desired state.
// TODO(user): Modify the Reconcile function to compare the state specified by
Expand Down Expand Up @@ -281,18 +299,7 @@ func (r *GuardrailsOrchestratorReconciler) Reconcile(ctx context.Context, req ct
return ctrl.Result{}, err
}

if orchestrator.Spec.TLSSecrets != nil && len(*orchestrator.Spec.TLSSecrets) > 0 {
for _, tlsSecret := range *orchestrator.Spec.TLSSecrets {
tlsMounts = append(tlsMounts, gorchv1alpha1.DetectedService{
Name: "",
Type: "",
Scheme: "",
Hostname: "",
Port: "",
TLSSecret: tlsSecret,
})
}
}
tlsMounts = appendTLSSecretsToMounts(orchestrator, tlsMounts)

// add TLS mounts to deployment
err = r.addTLSMounts(ctx, orchestrator, deployment, tlsMounts)
Expand Down Expand Up @@ -323,6 +330,43 @@ func (r *GuardrailsOrchestratorReconciler) Reconcile(ctx context.Context, req ct
} else if err != nil {
r.handleReconciliationError(ctx, log, orchestrator, err, utils.ReconcileFailed, "Failed to get Deployment")
return ctrl.Result{}, err
} else {
// Deployment exists, check if it needs to be updated
newDeployment, err := r.createDeployment(ctx, orchestrator)
if err != nil {
r.handleReconciliationError(ctx, log, orchestrator, err, utils.ReconcileFailed, "Failed to create updated Deployment spec")
return ctrl.Result{}, err
}

tlsMounts = appendTLSSecretsToMounts(orchestrator, tlsMounts)

// add TLS mounts to the new deployment spec
err = r.addTLSMounts(ctx, orchestrator, newDeployment, tlsMounts)
if err != nil {
if errors.IsNotFound(err) {
log.Info("Could not find required TLS serving secrets, will try again.")
return ctrl.Result{}, nil
}
r.handleReconciliationError(ctx, log, orchestrator, err, utils.ReconcileFailed, "Failed to add TLS Mounts")
return ctrl.Result{}, err
}
Comment thread
coderabbitai[bot] marked this conversation as resolved.

// Preserve existing annotations and add configmap hashes
annotations := existingDeployment.Spec.Template.Annotations
if annotations == nil {
annotations = map[string]string{}
}
r.setConfigMapHashAnnotations(ctx, orchestrator, annotations)
newDeployment.Spec.Template.Annotations = annotations

// Patch the deployment if there are changes
if patchDeployment(existingDeployment, newDeployment) {
log.Info("Updating Deployment", "Deployment.Namespace", existingDeployment.Namespace, "Deployment.Name", existingDeployment.Name)
if updateErr := r.Update(ctx, existingDeployment); updateErr != nil {
r.handleReconciliationError(ctx, log, orchestrator, updateErr, utils.ReconcileFailed, "Failed to update Deployment")
return ctrl.Result{}, updateErr
}
}
}

// monitor the orchestrator or gateway config for changes
Expand Down
173 changes: 172 additions & 1 deletion controllers/gorch/guardrailsorchestrator_controller_test.go
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

what do you think about adding some additional tests here, e.g.:

  • test default behavior (not specifying replicas)
  • iInvalid value (replicas: 0)
  • scaling existing deployment (update from 1 to 3)

Copy link
Copy Markdown
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Currently the controller doesn't update the deployment when the CR changes (eg. scaling replicas) but I think it's useful to have so I added the following lines: https://github.com/trustyai-explainability/trustyai-service-operator/pull/623/changes#diff-ef06797c0d315f0614d1c745b355ae0cf01b2ab5f621d0f8820a720c7bed527cR327-R361

Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,6 @@ func createGuardrailsOrchestrator(ctx context.Context, orchestratorConfigMap str
Namespace: typedNamespacedName.Namespace,
},
Spec: gorchv1alpha1.GuardrailsOrchestratorSpec{
Replicas: 1,
OrchestratorConfig: &orchestratorConfigMap,
},
}
Expand Down Expand Up @@ -105,6 +104,25 @@ func createGuardrailsOrchestratorOtelExporter(ctx context.Context, orchestratorC
return err
Comment thread
christinaexyou marked this conversation as resolved.
}

func createGuardrailsOrchestratorWithReplicas(ctx context.Context, orchestratorConfigMap string, replicas int32) error {
typedNamespacedName := types.NamespacedName{Name: orchestratorName, Namespace: namespaceName}
err := k8sClient.Get(ctx, typedNamespacedName, &gorchv1alpha1.GuardrailsOrchestrator{})
if err != nil && errors.IsNotFound(err) {
gorch := &gorchv1alpha1.GuardrailsOrchestrator{
ObjectMeta: metav1.ObjectMeta{
Name: typedNamespacedName.Name,
Namespace: typedNamespacedName.Namespace,
},
Spec: gorchv1alpha1.GuardrailsOrchestratorSpec{
Replicas: replicas,
OrchestratorConfig: &orchestratorConfigMap,
},
}
err = k8sClient.Create(ctx, gorch)
}
return err
}

func deleteGuardrailsOrchestrator(ctx context.Context, name string, namespace string) error {
typedNamespacedName := types.NamespacedName{Name: name, Namespace: namespace}
err := k8sClient.Get(ctx, typedNamespacedName, &gorchv1alpha1.GuardrailsOrchestrator{})
Expand Down Expand Up @@ -422,6 +440,157 @@ func testCreateDeleteGuardrailsOrchestratorOtelExporter(namespaceName string) {
})
}

func testCreateGuardrailsOrchestratorWithCustomReplicas(namespaceName string) {
It("Should successfully create a deployment with custom replica count", func() {
By("Creating a custom resource for the GuardrailsOrchestrator with 3 replicas")
ctx := context.Background()
typedNamespacedName := types.NamespacedName{Name: orchestratorName, Namespace: namespaceName}

orchConfig := &corev1.ConfigMap{
TypeMeta: metav1.TypeMeta{
Kind: "ConfigMap",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: orchestratorName + "-config",
Namespace: namespaceName,
},
}
err := k8sClient.Create(ctx, orchConfig)
if err != nil && !errors.IsAlreadyExists(err) {
Expect(err).ToNot(HaveOccurred())
}

err = createGuardrailsOrchestratorWithReplicas(ctx, orchestratorName+"-config", 3)
Expect(err).ToNot(HaveOccurred())

By("Checking if the custom resource was successfully created")
err = k8sClient.Get(ctx, typedNamespacedName, &gorchv1alpha1.GuardrailsOrchestrator{})
Expect(err).ToNot(HaveOccurred())

By("Reconciling the custom resource that was created")
reconciler := &GuardrailsOrchestratorReconciler{
Client: k8sClient,
Scheme: k8sClient.Scheme(),
Namespace: namespaceName,
}

_, err = reconciler.Reconcile(ctx, reconcile.Request{NamespacedName: typedNamespacedName})
Expect(err).ToNot(HaveOccurred())

By("Checking if the deployment was created with 3 replicas")
Eventually(func() error {
deployment := &appsv1.Deployment{}
if err = k8sClient.Get(ctx, types.NamespacedName{Name: orchestratorName, Namespace: namespaceName}, deployment); err != nil {
return err
}
Expect(*deployment.Spec.Replicas).Should(Equal(int32(3)))
Expect(deployment.Namespace).Should(Equal(namespaceName))
Expect(deployment.Name).Should(Equal(orchestratorName))
return nil
}, time.Second*10, time.Millisecond*10).Should(Succeed())

By("Deleting the custom resource for the GuardrailsOrchestrator")
err = deleteGuardrailsOrchestrator(ctx, orchestratorName, namespaceName)
Expect(err).ToNot(HaveOccurred())

By("Deleting the orchestrator configmap")
err = k8sClient.Delete(ctx, &corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: orchestratorName + "-config", Namespace: namespaceName}})
Expect(err).ToNot(HaveOccurred())

By("Reconciling the custom resource that was deleted")
_, err = reconciler.Reconcile(ctx, reconcile.Request{NamespacedName: typedNamespacedName})
Expect(err).ToNot(HaveOccurred())
})
}

func testCreateGuardrailsOrchestratorWithScaledUpReplicas(namespaceName string) {
It("Should successfully scale up the deployment from 1 to 3 replicas", func() {
By("Creating a custom resource for the GuardrailsOrchestrator with 1 replica")
ctx := context.Background()
typedNamespacedName := types.NamespacedName{Name: orchestratorName, Namespace: namespaceName}

orchConfig := &corev1.ConfigMap{
TypeMeta: metav1.TypeMeta{
Kind: "ConfigMap",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: orchestratorName + "-config",
Namespace: namespaceName,
},
}
err := k8sClient.Create(ctx, orchConfig)
if err != nil && !errors.IsAlreadyExists(err) {
Expect(err).ToNot(HaveOccurred())
}

err = createGuardrailsOrchestratorWithReplicas(ctx, orchestratorName+"-config", 1)
Expect(err).ToNot(HaveOccurred())

By("Checking if the custom resource was successfully created")
err = k8sClient.Get(ctx, typedNamespacedName, &gorchv1alpha1.GuardrailsOrchestrator{})
Expect(err).ToNot(HaveOccurred())

By("Reconciling the custom resource that was created")
reconciler := &GuardrailsOrchestratorReconciler{
Client: k8sClient,
Scheme: k8sClient.Scheme(),
Namespace: namespaceName,
}

_, err = reconciler.Reconcile(ctx, reconcile.Request{NamespacedName: typedNamespacedName})
Expect(err).ToNot(HaveOccurred())

By("Checking if the deployment was created with 1 replica")
Eventually(func() error {
deployment := &appsv1.Deployment{}
if err = k8sClient.Get(ctx, types.NamespacedName{Name: orchestratorName, Namespace: namespaceName}, deployment); err != nil {
return err
}
Expect(*deployment.Spec.Replicas).Should(Equal(int32(1)))
return nil
}, time.Second*10, time.Millisecond*10).Should(Succeed())

By("Updating the GuardrailsOrchestrator to 3 replicas")
orchestrator := &gorchv1alpha1.GuardrailsOrchestrator{}
err = k8sClient.Get(ctx, typedNamespacedName, orchestrator)
Expect(err).ToNot(HaveOccurred())

orchestrator.Spec.Replicas = 3
err = k8sClient.Update(ctx, orchestrator)
Expect(err).ToNot(HaveOccurred())

By("Reconciling the updated custom resource")
_, err = reconciler.Reconcile(ctx, reconcile.Request{NamespacedName: typedNamespacedName})
Expect(err).ToNot(HaveOccurred())

By("Checking if the deployment was scaled up to 3 replicas")
Eventually(func() error {
deployment := &appsv1.Deployment{}
if err = k8sClient.Get(ctx, types.NamespacedName{Name: orchestratorName, Namespace: namespaceName}, deployment); err != nil {
return err
}
Expect(*deployment.Spec.Replicas).Should(Equal(int32(3)))
Expect(deployment.Namespace).Should(Equal(namespaceName))
Expect(deployment.Name).Should(Equal(orchestratorName))
return nil
}, time.Second*10, time.Millisecond*10).Should(Succeed())

By("Deleting the custom resource for the GuardrailsOrchestrator")
err = deleteGuardrailsOrchestrator(ctx, orchestratorName, namespaceName)
Expect(err).ToNot(HaveOccurred())

By("Deleting the orchestrator configmap")
err = k8sClient.Delete(ctx, &corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: orchestratorName + "-config", Namespace: namespaceName}})
Expect(err).ToNot(HaveOccurred())

By("Reconciling the custom resource that was deleted")
_, err = reconciler.Reconcile(ctx, reconcile.Request{NamespacedName: typedNamespacedName})
Expect(err).ToNot(HaveOccurred())
})
}

func testCreateTwoGuardrailsOrchestratorsInSameNamespace(namespaceName string) {
It("Should successfully reconcile two custom resources for the GuardrailsOrchestrator in the same namespace", func() {
By("Creating the first custom resource for the GuardrailsOrchestrator")
Expand Down Expand Up @@ -813,6 +982,8 @@ var _ = Describe("GuardrailsOrchestrator Controller", func() {
testCreateDeleteGuardrailsOrchestrator(namespaceName)
testCreateDeleteGuardrailsOrchestratorSidecar(namespaceName)
testCreateDeleteGuardrailsOrchestratorOtelExporter(namespaceName)
testCreateGuardrailsOrchestratorWithCustomReplicas(namespaceName)
testCreateGuardrailsOrchestratorWithScaledUpReplicas(namespaceName)
testCreateTwoGuardrailsOrchestratorsInSameNamespace(namespaceName)
testCreateTwoGuardrailsOrchestratorsInDifferentNamespaces(namespaceName, secondNamespaceName)
})
Expand Down
2 changes: 1 addition & 1 deletion controllers/gorch/templates/deployment.tmpl.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,7 @@ metadata:
app.kubernetes.io/name: {{.Orchestrator.Name}}
app.kubernetes.io/part-of: trustyai
spec:
replicas: 1
replicas: {{.Orchestrator.Spec.Replicas}}
selector:
matchLabels:
app: {{.Orchestrator.Name}}
Expand Down
Loading