diff --git a/test/extended/node/CLAUDE.md b/test/extended/node/CLAUDE.md new file mode 100644 index 000000000000..8567ddfcb08e --- /dev/null +++ b/test/extended/node/CLAUDE.md @@ -0,0 +1,50 @@ +# OpenShift Node E2E Tests - Tribal Knowledge + +## Core Principle + +**ALWAYS use the utility functions in `node_utils.go`** instead of implementing your own. Read that file to discover available helpers. + +## Key Functions & Context + +### Node Selection + +- **GetNodesByLabel** - Use this for getting a subset of the nodes. The labels must be carefully chosen. +- **GetControlPlaneNodes** - These are the master nodes or the control plane nodes. In most clusters it will return 3 of them. +- **GetPureWorkerNodes** - Use this to make sure that the node returned is not a control plane node. + +### Node Command Execution + +- **ExecOnNodeWithChroot** - Use this for all the root command executions inside a debug container. This can change the state of the node. Use it with caution. + +### Kubelet Configuration & Lifecycle + +- **GetKubeletConfigFromNode** - Use this to check if a kubelet configuration made at the API level has been applied to the node. +- **CleanupDropInAndRestartKubelet** - Kubelet supports drop-in directory. If you manually drop-in a config use this to clean up. +- **IsNodeInReadyState** - Use this to find out if the node has completed its restart and back to ready state. +- **WaitForNodeToBeReady** - If any kubelet config is applied, use this to wait for the node to reach a ready state. +- **RestartKubeletOnNode** - Use this when testing kubelet restarts and also in cases when there are some issues that's outside the context. + +### MachineConfig Operations + +- **WaitForMCP** - This is used when you create a new machine config and wait for it to be applied. Although parallel, if multiple nodes are involved it can take more time. + +## Common Mistakes to Avoid + +1. **Don't manually construct `oc debug` commands** - use `ExecOnNodeWithChroot()` or `ExecOnNodeWithNsenter()` + +2. **Don't forget to handle SNO clusters** - use `GetPureWorkerNodes()` to filter out nodes with dual roles + +3. **Don't skip context propagation** - always pass `ctx` to utility functions + +4. **Don't forget cleanup** - use `defer` or `g.AfterEach` with `CleanupDropInAndRestartKubelet()` + +5. **Don't ignore MCP rollouts** - after MachineConfig changes, use `WaitForMCP()` to ensure stability + +6. **Don't assume swap operations work with chroot** - use `ExecOnNodeWithNsenter()` for swap commands + +## Getting Help + +- Read the function documentation in `node_utils.go` +- Look at existing tests in this directory for patterns +- Check testdata files in `testdata/node/` for config examples +- See `node_swap_cnv.go` for a complete example diff --git a/test/extended/node/claude-docs-check.sh b/test/extended/node/claude-docs-check.sh new file mode 100755 index 000000000000..b91acf7f049f --- /dev/null +++ b/test/extended/node/claude-docs-check.sh @@ -0,0 +1,37 @@ +#!/bin/bash +# Check if node_utils.go has functions not mentioned in CLAUDE.md + +set -e + +NODE_UTILS="test/extended/node/node_utils.go" +CLAUDE_MD="test/extended/node/CLAUDE.md" + +# Extract ONLY exported function names from node_utils.go (start with uppercase) +# Lowercase (unexported) helpers are intentionally not documented in CLAUDE.md +# Matches both standalone functions and receiver methods, including digits in names +UTILS_FUNCS=$( + grep -E '^[[:space:]]*func([[:space:]]+\([^)]*\))?[[:space:]]+[A-Z][A-Za-z0-9_]*[[:space:]]*\(' "$NODE_UTILS" \ + | sed -E 's/^[[:space:]]*func([[:space:]]+\([^)]*\))?[[:space:]]+([A-Z][A-Za-z0-9_]*)[[:space:]]*\(.*/\2/' \ + | sort -u +) + +# Read CLAUDE.md once for efficiency +CLAUDE_CONTENT=$(cat "$CLAUDE_MD") + +# Check each function is mentioned in CLAUDE.md (word-boundary match to avoid false positives) +MISSING=() +for func in $UTILS_FUNCS; do + if ! echo "$CLAUDE_CONTENT" | grep -Fqw "$func"; then + MISSING+=(" - $func()") + fi +done + +if [ ${#MISSING[@]} -gt 0 ]; then + echo "⚠️ Warning: node_utils.go functions not documented in CLAUDE.md:" + printf '%s\n' "${MISSING[@]}" + echo "" + echo "Please update CLAUDE.md to document these utility functions." + exit 1 +fi + +echo "✅ All node_utils.go functions are documented in CLAUDE.md" diff --git a/test/extended/node/node_sizing.go b/test/extended/node/node_sizing.go index bfba9942473d..977a631198d3 100644 --- a/test/extended/node/node_sizing.go +++ b/test/extended/node/node_sizing.go @@ -151,7 +151,7 @@ var _ = g.Describe("[Suite:openshift/disruptive-longrunning][sig-node][Disruptiv g.DeferCleanup(cleanupMCP) g.By("Waiting for custom MachineConfigPool to be ready") - err = waitForMCP(ctx, mcClient, testMCPName, 5*time.Minute) + err = WaitForMCP(ctx, mcClient, testMCPName, 5*time.Minute) o.Expect(err).NotTo(o.HaveOccurred(), "Custom MachineConfigPool should become ready") verifyNodeSizingEnabledFile(oc, nodeName, "true") @@ -193,7 +193,7 @@ var _ = g.Describe("[Suite:openshift/disruptive-longrunning][sig-node][Disruptiv // Wait for custom MCP to be ready after cleanup g.By("Waiting for custom MCP to be ready after KubeletConfig deletion") - waitErr := waitForMCP(cleanupCtx, mcClient, testMCPName, 5*time.Minute) + waitErr := WaitForMCP(cleanupCtx, mcClient, testMCPName, 5*time.Minute) if apierrors.IsNotFound(waitErr) { // MachineConfigPool already deleted, nothing to wait for } else if waitErr != nil { @@ -229,7 +229,7 @@ var _ = g.Describe("[Suite:openshift/disruptive-longrunning][sig-node][Disruptiv }, 2*time.Minute, 10*time.Second).Should(o.BeTrue(), fmt.Sprintf("%s MCP should start updating", testMCPName)) g.By(fmt.Sprintf("Waiting for %s MCP to be ready with new configuration", testMCPName)) - err = waitForMCP(ctx, mcClient, testMCPName, 15*time.Minute) + err = WaitForMCP(ctx, mcClient, testMCPName, 15*time.Minute) o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("%s MCP should become ready with new configuration", testMCPName)) verifyNodeSizingEnabledFile(oc, nodeName, "false") diff --git a/test/extended/node/node_swap.go b/test/extended/node/node_swap.go index f3f0c151c0ce..4daa73bda283 100644 --- a/test/extended/node/node_swap.go +++ b/test/extended/node/node_swap.go @@ -45,16 +45,16 @@ var _ = g.Describe("[Jira:Node][sig-node] Node non-cnv swap configuration", func // the kubelet will not use it for memory management, maintaining consistent behavior across the cluster. g.It("should have correct default kubelet swap settings with worker nodes failSwapOn=false, control plane nodes failSwapOn=true, and both swapBehavior=NoSwap [OCP-86394]", ote.Informing(), func(ctx context.Context) { g.By("Getting worker nodes") - allWorkerNodes, err := getNodesByLabel(ctx, oc, "node-role.kubernetes.io/worker") + allWorkerNodes, err := GetNodesByLabel(ctx, oc, "node-role.kubernetes.io/worker") o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(len(allWorkerNodes)).Should(o.BeNumerically(">", 0), "Expected at least one worker node") // Filter out nodes that are also control plane (e.g., SNO) - workerNodes := getPureWorkerNodes(allWorkerNodes) + workerNodes := GetPureWorkerNodes(allWorkerNodes) g.By("Validating kubelet configuration on each worker node") for _, node := range workerNodes { - config, err := getKubeletConfigFromNode(ctx, oc, node.Name) + config, err := GetKubeletConfigFromNode(ctx, oc, node.Name) o.Expect(err).NotTo(o.HaveOccurred(), "Failed to get kubelet config for worker node %s", node.Name) g.By(fmt.Sprintf("Checking failSwapOn=false on worker node %s", node.Name)) @@ -74,13 +74,13 @@ var _ = g.Describe("[Jira:Node][sig-node] Node non-cnv swap configuration", func if *controlPlaneTopology != configv1.ExternalTopologyMode { g.By("Getting control plane nodes") - controlPlaneNodes, err := getControlPlaneNodes(ctx, oc) + controlPlaneNodes, err := GetControlPlaneNodes(ctx, oc) o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(len(controlPlaneNodes)).Should(o.BeNumerically(">", 0), "Expected at least one control plane node") g.By("Validating kubelet configuration on each control plane node") for _, node := range controlPlaneNodes { - config, err := getKubeletConfigFromNode(ctx, oc, node.Name) + config, err := GetKubeletConfigFromNode(ctx, oc, node.Name) o.Expect(err).NotTo(o.HaveOccurred(), "Failed to get kubelet config for control plane node %s", node.Name) g.By(fmt.Sprintf("Checking failSwapOn=true on control plane node %s", node.Name)) @@ -113,7 +113,7 @@ var _ = g.Describe("[Jira:Node][sig-node] Node non-cnv swap configuration", func g.By("Getting initial machine config resourceVersion") // Get the initial resourceVersion of the worker machine config before creating KubeletConfig - workerGeneratedKubeletMC, err := getWorkerGeneratedKubeletMC(ctx, mcClient) + workerGeneratedKubeletMC, err := GetWorkerGeneratedKubeletMC(ctx, mcClient) o.Expect(err).NotTo(o.HaveOccurred(), "Failed to find worker-generated-kubelet MachineConfig") initialResourceVersion := workerGeneratedKubeletMC.ResourceVersion framework.Logf("Initial %s resourceVersion: %s", workerGeneratedKubeletMC.Name, initialResourceVersion) @@ -183,21 +183,21 @@ var _ = g.Describe("[Jira:Node][sig-node] Node non-cnv swap configuration", func time.Sleep(5 * time.Second) // Check if the machine config was created or updated (compare to initial resourceVersion captured earlier) - workerMCAfter, err := getWorkerGeneratedKubeletMC(ctx, mcClient) + workerMCAfter, err := GetWorkerGeneratedKubeletMC(ctx, mcClient) o.Expect(err).NotTo(o.HaveOccurred(), "Failed to find worker-generated-kubelet MachineConfig for verification") o.Expect(workerMCAfter.ResourceVersion).To(o.Equal(initialResourceVersion), "Machine config %s should not be updated when failSwapOn is rejected", workerMCAfter.Name) framework.Logf("Verified: %s was not updated (resourceVersion: %s)", workerMCAfter.Name, workerMCAfter.ResourceVersion) g.By("Verifying worker nodes still have correct swap settings") - allWorkerNodes, err := getNodesByLabel(ctx, oc, "node-role.kubernetes.io/worker") + allWorkerNodes, err := GetNodesByLabel(ctx, oc, "node-role.kubernetes.io/worker") o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(len(allWorkerNodes)).Should(o.BeNumerically(">", 0), "Expected at least one worker node") // Filter out nodes that are also control plane (e.g., SNO) - workerNodes := getPureWorkerNodes(allWorkerNodes) + workerNodes := GetPureWorkerNodes(allWorkerNodes) for _, node := range workerNodes { - config, err := getKubeletConfigFromNode(ctx, oc, node.Name) + config, err := GetKubeletConfigFromNode(ctx, oc, node.Name) o.Expect(err).NotTo(o.HaveOccurred(), "Failed to get kubelet config for worker node %s", node.Name) g.By(fmt.Sprintf("Verifying failSwapOn=false remains unchanged on worker node %s", node.Name)) diff --git a/test/extended/node/node_swap_cnv.go b/test/extended/node/node_swap_cnv.go index 9290b863a62b..dd55f50c9d37 100644 --- a/test/extended/node/node_swap_cnv.go +++ b/test/extended/node/node_swap_cnv.go @@ -59,9 +59,9 @@ var _ = g.Describe("[Jira:Node/Kubelet][sig-node][Feature:NodeSwap][Serial][Disr } // Check if CNV is already installed - if !isCNVInstalled(ctx, oc) { + if !IsCNVInstalled(ctx, oc) { framework.Logf("CNV operator not installed, installing...") - err := installCNVOperator(ctx, oc) + err := InstallCNVOperator(ctx, oc) if err != nil { framework.Logf("Failed to install CNV operator: %v", err) e2eskipper.Skipf("Failed to install CNV operator: %v", err) @@ -73,7 +73,7 @@ var _ = g.Describe("[Jira:Node/Kubelet][sig-node][Feature:NodeSwap][Serial][Disr } // Ensure drop-in directory exists on all worker nodes - err = ensureDropInDirectoryExists(ctx, oc, cnvDropInDir) + err = EnsureDropInDirectoryExists(ctx, oc, cnvDropInDir) if err != nil { framework.Logf("Warning: failed to ensure drop-in directory exists: %v", err) } @@ -84,7 +84,7 @@ var _ = g.Describe("[Jira:Node/Kubelet][sig-node][Feature:NodeSwap][Serial][Disr // Uninstall CNV operator if we installed it if cnvInstalledByTest { framework.Logf("Uninstalling CNV operator...") - err := uninstallCNVOperator(ctx, oc) + err := UninstallCNVOperator(ctx, oc) if err != nil { framework.Logf("Warning: failed to uninstall CNV operator: %v", err) } @@ -94,17 +94,17 @@ var _ = g.Describe("[Jira:Node/Kubelet][sig-node][Feature:NodeSwap][Serial][Disr // TC1: Verify silent creation and ownership of drop-in directory g.It("TC1: should verify silent creation and ownership of drop-in directory on CNV nodes", func(ctx context.Context) { // Get a CNV worker node for tests - cnvWorkerNode = getCNVWorkerNodeName(ctx, oc) + cnvWorkerNode = GetCNVWorkerNodeName(ctx, oc) o.Expect(cnvWorkerNode).NotTo(o.BeEmpty(), "No CNV worker nodes available") framework.Logf("Using CNV worker node for tests: %s", cnvWorkerNode) g.By("Getting worker nodes") - allWorkerNodes, err := getNodesByLabel(ctx, oc, "node-role.kubernetes.io/worker") + allWorkerNodes, err := GetNodesByLabel(ctx, oc, "node-role.kubernetes.io/worker") o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(len(allWorkerNodes)).Should(o.BeNumerically(">", 0), "Expected at least one worker node") // Filter out nodes that are also control plane (e.g., SNO) - workerNodes := getPureWorkerNodes(allWorkerNodes) + workerNodes := GetPureWorkerNodes(allWorkerNodes) var workerNodeNames []string for _, node := range workerNodes { @@ -159,7 +159,7 @@ var _ = g.Describe("[Jira:Node/Kubelet][sig-node][Feature:NodeSwap][Serial][Disr if *controlPlaneTopology != configv1.ExternalTopologyMode { g.By("Verifying drop-in directory does NOT exist on control plane/master nodes") - controlPlaneNodes, err := getNodesByLabel(ctx, oc, "node-role.kubernetes.io/master") + controlPlaneNodes, err := GetNodesByLabel(ctx, oc, "node-role.kubernetes.io/master") o.Expect(err).NotTo(o.HaveOccurred()) framework.Logf("Found %d control plane/master nodes", len(controlPlaneNodes)) @@ -183,7 +183,7 @@ var _ = g.Describe("[Jira:Node/Kubelet][sig-node][Feature:NodeSwap][Serial][Disr g.It("TC2: should verify kubelet starts normally with empty directory", func(ctx context.Context) { skipOnSingleNodeTopology(oc) //skip this test for SNO // Get a CNV worker node for tests - cnvWorkerNode = getCNVWorkerNodeName(ctx, oc) + cnvWorkerNode = GetCNVWorkerNodeName(ctx, oc) o.Expect(cnvWorkerNode).NotTo(o.BeEmpty(), "No CNV worker nodes available") framework.Logf("Using CNV worker node for tests: %s", cnvWorkerNode) @@ -202,7 +202,7 @@ var _ = g.Describe("[Jira:Node/Kubelet][sig-node][Feature:NodeSwap][Serial][Disr g.By("Verifying node is Ready") node, err := oc.AdminKubeClient().CoreV1().Nodes().Get(ctx, cnvWorkerNode, metav1.GetOptions{}) o.Expect(err).NotTo(o.HaveOccurred()) - o.Expect(isNodeInReadyState(node)).To(o.BeTrue(), "Node should be in Ready state") + o.Expect(IsNodeInReadyState(node)).To(o.BeTrue(), "Node should be in Ready state") framework.Logf("TC2 PASSED: Kubelet starts normally with empty/missing directory") }) @@ -211,7 +211,7 @@ var _ = g.Describe("[Jira:Node/Kubelet][sig-node][Feature:NodeSwap][Serial][Disr g.It("TC3: should apply LimitedSwap configuration from drop-in file", func(ctx context.Context) { skipOnSingleNodeTopology(oc) //skip this test for SNO // Get a CNV worker node for tests - cnvWorkerNode = getCNVWorkerNodeName(ctx, oc) + cnvWorkerNode = GetCNVWorkerNodeName(ctx, oc) o.Expect(cnvWorkerNode).NotTo(o.BeEmpty(), "No CNV worker nodes available") framework.Logf("=== TC3: Testing LimitedSwap configuration via drop-in file ===") @@ -221,16 +221,16 @@ var _ = g.Describe("[Jira:Node/Kubelet][sig-node][Feature:NodeSwap][Serial][Disr framework.Logf("Full path: %s", cnvDropInFilePath) g.By("Getting kubelet config BEFORE applying drop-in file") - configBefore, err := getKubeletConfigFromNode(ctx, oc, cnvWorkerNode) + configBefore, err := GetKubeletConfigFromNode(ctx, oc, cnvWorkerNode) o.Expect(err).NotTo(o.HaveOccurred()) framework.Logf("Kubelet swapBehavior BEFORE: '%s'", configBefore.MemorySwap.SwapBehavior) // If LimitedSwap is already enabled, clean up first to start from NoSwap state if configBefore.MemorySwap.SwapBehavior == "LimitedSwap" { g.By("LimitedSwap already enabled - cleaning up to start from NoSwap state") - cleanupDropInAndRestartKubelet(ctx, oc, cnvWorkerNode, cnvDropInFilePath) + CleanupDropInAndRestartKubelet(ctx, oc, cnvWorkerNode, cnvDropInFilePath) - configBefore, err = getKubeletConfigFromNode(ctx, oc, cnvWorkerNode) + configBefore, err = GetKubeletConfigFromNode(ctx, oc, cnvWorkerNode) o.Expect(err).NotTo(o.HaveOccurred()) framework.Logf("Kubelet swapBehavior BEFORE (after cleanup): '%s'", configBefore.MemorySwap.SwapBehavior) o.Expect(configBefore.MemorySwap.SwapBehavior).To(o.Or(o.BeEmpty(), o.Equal("NoSwap")), @@ -238,8 +238,8 @@ var _ = g.Describe("[Jira:Node/Kubelet][sig-node][Feature:NodeSwap][Serial][Disr } g.By("Creating drop-in file with LimitedSwap configuration in /etc/openshift/kubelet.conf.d/") - framework.Logf("Creating file: %s with content:\n%s", cnvDropInFilePath, loadConfigFromFile(cnvLimitedSwapConfigPath)) - err = createDropInFile(oc, cnvWorkerNode, cnvDropInFilePath, loadConfigFromFile(cnvLimitedSwapConfigPath)) + framework.Logf("Creating file: %s with content:\n%s", cnvDropInFilePath, LoadConfigFromFile(cnvLimitedSwapConfigPath)) + err = CreateDropInFile(oc, cnvWorkerNode, cnvDropInFilePath, LoadConfigFromFile(cnvLimitedSwapConfigPath)) o.Expect(err).NotTo(o.HaveOccurred()) g.By("Verifying drop-in file was created successfully") @@ -251,17 +251,17 @@ var _ = g.Describe("[Jira:Node/Kubelet][sig-node][Feature:NodeSwap][Serial][Disr // Defer cleanup defer func() { g.By("Cleaning up - removing drop-in file and restarting kubelet") - cleanupDropInAndRestartKubelet(ctx, oc, cnvWorkerNode, cnvDropInFilePath) + CleanupDropInAndRestartKubelet(ctx, oc, cnvWorkerNode, cnvDropInFilePath) }() g.By("Restarting kubelet to load the new configuration") - err = restartKubeletOnNode(ctx, oc, cnvWorkerNode) + err = RestartKubeletOnNode(ctx, oc, cnvWorkerNode) o.Expect(err).NotTo(o.HaveOccurred()) g.By("Waiting for node to be ready after kubelet restart") - waitForNodeToBeReady(ctx, oc, cnvWorkerNode) + WaitForNodeToBeReady(ctx, oc, cnvWorkerNode) - configAfter, err := getKubeletConfigFromNode(ctx, oc, cnvWorkerNode) + configAfter, err := GetKubeletConfigFromNode(ctx, oc, cnvWorkerNode) o.Expect(err).NotTo(o.HaveOccurred()) framework.Logf("Kubelet swapBehavior AFTER: '%s'", configAfter.MemorySwap.SwapBehavior) o.Expect(configAfter.MemorySwap.SwapBehavior).To(o.Equal("LimitedSwap"), @@ -276,14 +276,14 @@ var _ = g.Describe("[Jira:Node/Kubelet][sig-node][Feature:NodeSwap][Serial][Disr g.It("TC4: should revert to NoSwap when drop-in file is removed", func(ctx context.Context) { skipOnSingleNodeTopology(oc) //skip this test for SNO // Get a CNV worker node for tests - cnvWorkerNode = getCNVWorkerNodeName(ctx, oc) + cnvWorkerNode = GetCNVWorkerNodeName(ctx, oc) o.Expect(cnvWorkerNode).NotTo(o.BeEmpty(), "No CNV worker nodes available") framework.Logf("=== TC4: Testing revert to NoSwap when drop-in file is removed ===") framework.Logf("Executing on node: %s", cnvWorkerNode) g.By("Getting initial kubelet config") - configInitial, err := getKubeletConfigFromNode(ctx, oc, cnvWorkerNode) + configInitial, err := GetKubeletConfigFromNode(ctx, oc, cnvWorkerNode) o.Expect(err).NotTo(o.HaveOccurred()) framework.Logf("Initial swapBehavior: '%s'", configInitial.MemorySwap.SwapBehavior) @@ -291,16 +291,16 @@ var _ = g.Describe("[Jira:Node/Kubelet][sig-node][Feature:NodeSwap][Serial][Disr if configInitial.MemorySwap.SwapBehavior != "LimitedSwap" { g.By("Creating drop-in file with LimitedSwap configuration") framework.Logf("Creating file: %s", cnvDropInFilePath) - err = createDropInFile(oc, cnvWorkerNode, cnvDropInFilePath, loadConfigFromFile(cnvLimitedSwapConfigPath)) + err = CreateDropInFile(oc, cnvWorkerNode, cnvDropInFilePath, LoadConfigFromFile(cnvLimitedSwapConfigPath)) o.Expect(err).NotTo(o.HaveOccurred()) g.By("Restarting kubelet to apply LimitedSwap") - err = restartKubeletOnNode(ctx, oc, cnvWorkerNode) + err = RestartKubeletOnNode(ctx, oc, cnvWorkerNode) o.Expect(err).NotTo(o.HaveOccurred()) - waitForNodeToBeReady(ctx, oc, cnvWorkerNode) + WaitForNodeToBeReady(ctx, oc, cnvWorkerNode) g.By("Verifying LimitedSwap is applied") - configWithSwap, err := getKubeletConfigFromNode(ctx, oc, cnvWorkerNode) + configWithSwap, err := GetKubeletConfigFromNode(ctx, oc, cnvWorkerNode) o.Expect(err).NotTo(o.HaveOccurred()) framework.Logf("swapBehavior AFTER applying drop-in: '%s'", configWithSwap.MemorySwap.SwapBehavior) o.Expect(configWithSwap.MemorySwap.SwapBehavior).To(o.Equal("LimitedSwap")) @@ -309,10 +309,10 @@ var _ = g.Describe("[Jira:Node/Kubelet][sig-node][Feature:NodeSwap][Serial][Disr } g.By("Removing drop-in file and restarting kubelet") - cleanupDropInAndRestartKubelet(ctx, oc, cnvWorkerNode, cnvDropInFilePath) + CleanupDropInAndRestartKubelet(ctx, oc, cnvWorkerNode, cnvDropInFilePath) g.By("Verifying swapBehavior reverts to NoSwap") - configAfterRemoval, err := getKubeletConfigFromNode(ctx, oc, cnvWorkerNode) + configAfterRemoval, err := GetKubeletConfigFromNode(ctx, oc, cnvWorkerNode) o.Expect(err).NotTo(o.HaveOccurred()) framework.Logf("swapBehavior AFTER removing drop-in: '%s'", configAfterRemoval.MemorySwap.SwapBehavior) o.Expect(configAfterRemoval.MemorySwap.SwapBehavior).To(o.Or(o.BeEmpty(), o.Equal("NoSwap")), @@ -331,7 +331,7 @@ var _ = g.Describe("[Jira:Node/Kubelet][sig-node][Feature:NodeSwap][Serial][Disr } // Get all control plane nodes - controlPlaneNodes, err := getControlPlaneNodes(ctx, oc) + controlPlaneNodes, err := GetControlPlaneNodes(ctx, oc) o.Expect(err).NotTo(o.HaveOccurred()) if len(controlPlaneNodes) == 0 { e2eskipper.Skipf("No control plane nodes available") @@ -343,7 +343,7 @@ var _ = g.Describe("[Jira:Node/Kubelet][sig-node][Feature:NodeSwap][Serial][Disr framework.Logf("--- Testing control plane node %d/%d: %s ---", i+1, len(controlPlaneNodes), cpNodeName) g.By(fmt.Sprintf("Getting kubelet config BEFORE placing drop-in file on %s", cpNodeName)) - configBefore, err := getKubeletConfigFromNode(ctx, oc, cpNodeName) + configBefore, err := GetKubeletConfigFromNode(ctx, oc, cpNodeName) o.Expect(err).NotTo(o.HaveOccurred()) framework.Logf("Control plane %s swapBehavior BEFORE: '%s'", cpNodeName, configBefore.MemorySwap.SwapBehavior) @@ -351,17 +351,17 @@ var _ = g.Describe("[Jira:Node/Kubelet][sig-node][Feature:NodeSwap][Serial][Disr _, _ = ExecOnNodeWithChroot(oc, cpNodeName, "mkdir", "-p", cnvDropInDir) g.By(fmt.Sprintf("Creating drop-in file on %s", cpNodeName)) - err = createDropInFile(oc, cpNodeName, cnvDropInFilePath, loadConfigFromFile(cnvLimitedSwapConfigPath)) + err = CreateDropInFile(oc, cpNodeName, cnvDropInFilePath, LoadConfigFromFile(cnvLimitedSwapConfigPath)) o.Expect(err).NotTo(o.HaveOccurred()) framework.Logf("Created drop-in file: %s on %s", cnvDropInFilePath, cpNodeName) g.By(fmt.Sprintf("Restarting kubelet on %s", cpNodeName)) - err = restartKubeletOnNode(ctx, oc, cpNodeName) + err = RestartKubeletOnNode(ctx, oc, cpNodeName) o.Expect(err).NotTo(o.HaveOccurred()) - waitForNodeToBeReady(ctx, oc, cpNodeName) + WaitForNodeToBeReady(ctx, oc, cpNodeName) g.By(fmt.Sprintf("Verifying %s did NOT apply LimitedSwap from drop-in", cpNodeName)) - configAfter, err := getKubeletConfigFromNode(ctx, oc, cpNodeName) + configAfter, err := GetKubeletConfigFromNode(ctx, oc, cpNodeName) o.Expect(err).NotTo(o.HaveOccurred()) framework.Logf("Control plane %s swapBehavior AFTER: '%s'", cpNodeName, configAfter.MemorySwap.SwapBehavior) @@ -373,7 +373,7 @@ var _ = g.Describe("[Jira:Node/Kubelet][sig-node][Feature:NodeSwap][Serial][Disr cpNodeName, configBefore.MemorySwap.SwapBehavior, configAfter.MemorySwap.SwapBehavior) g.By(fmt.Sprintf("Cleaning up %s", cpNodeName)) - removeDropInFile(oc, cpNodeName, cnvDropInFilePath) + RemoveDropInFile(oc, cpNodeName, cnvDropInFilePath) // Also remove the drop-in directory we created on control plane _, _ = ExecOnNodeWithChroot(oc, cpNodeName, "rmdir", cnvDropInDir) framework.Logf("Removed drop-in directory from control plane node %s", cpNodeName) @@ -387,7 +387,7 @@ var _ = g.Describe("[Jira:Node/Kubelet][sig-node][Feature:NodeSwap][Serial][Disr g.It("TC6: should verify drop-in directory is auto-recreated after deletion", func(ctx context.Context) { skipOnSingleNodeTopology(oc) //skip this test for SNO // Get a CNV worker node for tests - cnvWorkerNode = getCNVWorkerNodeName(ctx, oc) + cnvWorkerNode = GetCNVWorkerNodeName(ctx, oc) o.Expect(cnvWorkerNode).NotTo(o.BeEmpty(), "No CNV worker nodes available") framework.Logf("=== TC6: Testing drop-in directory auto-recreation ===") @@ -413,11 +413,11 @@ var _ = g.Describe("[Jira:Node/Kubelet][sig-node][Feature:NodeSwap][Serial][Disr framework.Logf("Confirmed: Directory does not exist after deletion") g.By("Restarting kubelet") - err = restartKubeletOnNode(ctx, oc, cnvWorkerNode) + err = RestartKubeletOnNode(ctx, oc, cnvWorkerNode) o.Expect(err).NotTo(o.HaveOccurred()) g.By("Waiting for node to be ready") - waitForNodeToBeReady(ctx, oc, cnvWorkerNode) + WaitForNodeToBeReady(ctx, oc, cnvWorkerNode) g.By("Verifying directory was auto-recreated") output, err = ExecOnNodeWithChroot(oc, cnvWorkerNode, "ls", "-la", cnvDropInDir) @@ -437,7 +437,7 @@ var _ = g.Describe("[Jira:Node/Kubelet][sig-node][Feature:NodeSwap][Serial][Disr g.It("TC7: should validate security and permissions of drop-in directory", func(ctx context.Context) { skipOnSingleNodeTopology(oc) //skip this test for SNO // Get a CNV worker node for tests - cnvWorkerNode = getCNVWorkerNodeName(ctx, oc) + cnvWorkerNode = GetCNVWorkerNodeName(ctx, oc) o.Expect(cnvWorkerNode).NotTo(o.BeEmpty(), "No CNV worker nodes available") framework.Logf("=== TC7: Testing security and permissions of drop-in directory ===") @@ -476,11 +476,11 @@ var _ = g.Describe("[Jira:Node/Kubelet][sig-node][Feature:NodeSwap][Serial][Disr g.By("Creating a test config file with correct permissions") testFile := cnvDropInDir + "/test-permissions.conf" framework.Logf("Creating test file: %s", testFile) - framework.Logf("File content:\n%s", loadConfigFromFile(cnvLimitedSwapConfigPath)) - err = createDropInFile(oc, cnvWorkerNode, testFile, loadConfigFromFile(cnvLimitedSwapConfigPath)) + framework.Logf("File content:\n%s", LoadConfigFromFile(cnvLimitedSwapConfigPath)) + err = CreateDropInFile(oc, cnvWorkerNode, testFile, LoadConfigFromFile(cnvLimitedSwapConfigPath)) o.Expect(err).NotTo(o.HaveOccurred()) framework.Logf("Test file created successfully") - defer removeDropInFile(oc, cnvWorkerNode, testFile) + defer RemoveDropInFile(oc, cnvWorkerNode, testFile) g.By("Verifying config file ownership") framework.Logf("Running: stat -c %%U:%%G %s", testFile) @@ -511,7 +511,7 @@ var _ = g.Describe("[Jira:Node/Kubelet][sig-node][Feature:NodeSwap][Serial][Disr g.It("TC8: should verify cluster stability with LimitedSwap enabled", func(ctx context.Context) { skipOnSingleNodeTopology(oc) //skip this test for SNO // Get a CNV worker node for tests - cnvWorkerNode = getCNVWorkerNodeName(ctx, oc) + cnvWorkerNode = GetCNVWorkerNodeName(ctx, oc) o.Expect(cnvWorkerNode).NotTo(o.BeEmpty(), "No CNV worker nodes available") framework.Logf("=== TC8: Testing cluster stability with LimitedSwap enabled ===") @@ -519,8 +519,8 @@ var _ = g.Describe("[Jira:Node/Kubelet][sig-node][Feature:NodeSwap][Serial][Disr g.By("Creating LimitedSwap configuration") framework.Logf("Creating drop-in file: %s", cnvDropInFilePath) - framework.Logf("Drop-in file content:\n%s", loadConfigFromFile(cnvLimitedSwapConfigPath)) - err := createDropInFile(oc, cnvWorkerNode, cnvDropInFilePath, loadConfigFromFile(cnvLimitedSwapConfigPath)) + framework.Logf("Drop-in file content:\n%s", LoadConfigFromFile(cnvLimitedSwapConfigPath)) + err := CreateDropInFile(oc, cnvWorkerNode, cnvDropInFilePath, LoadConfigFromFile(cnvLimitedSwapConfigPath)) o.Expect(err).NotTo(o.HaveOccurred()) framework.Logf("Drop-in file created successfully") @@ -531,20 +531,20 @@ var _ = g.Describe("[Jira:Node/Kubelet][sig-node][Feature:NodeSwap][Serial][Disr defer func() { g.By("Cleaning up") - cleanupDropInAndRestartKubelet(ctx, oc, cnvWorkerNode, cnvDropInFilePath) + CleanupDropInAndRestartKubelet(ctx, oc, cnvWorkerNode, cnvDropInFilePath) }() g.By("Restarting kubelet") framework.Logf("Running: systemctl restart kubelet on node %s", cnvWorkerNode) - err = restartKubeletOnNode(ctx, oc, cnvWorkerNode) + err = RestartKubeletOnNode(ctx, oc, cnvWorkerNode) o.Expect(err).NotTo(o.HaveOccurred()) framework.Logf("Kubelet restart initiated, waiting for node to be ready...") - waitForNodeToBeReady(ctx, oc, cnvWorkerNode) + WaitForNodeToBeReady(ctx, oc, cnvWorkerNode) framework.Logf("Node %s is Ready", cnvWorkerNode) g.By("Verifying kubelet loaded LimitedSwap configuration") framework.Logf("Running: oc get --raw \"/api/v1/nodes/%s/proxy/configz\" | jq '.kubeletconfig.memorySwap'", cnvWorkerNode) - config, err := getKubeletConfigFromNode(ctx, oc, cnvWorkerNode) + config, err := GetKubeletConfigFromNode(ctx, oc, cnvWorkerNode) o.Expect(err).NotTo(o.HaveOccurred()) framework.Logf("Kubelet config memorySwap.swapBehavior: '%s'", config.MemorySwap.SwapBehavior) o.Expect(config.MemorySwap.SwapBehavior).To(o.Equal("LimitedSwap")) @@ -563,7 +563,7 @@ var _ = g.Describe("[Jira:Node/Kubelet][sig-node][Feature:NodeSwap][Serial][Disr framework.Logf("Node condition: Type=%s, Status=%s, Reason=%s", condition.Type, condition.Status, condition.Reason) } } - o.Expect(isNodeInReadyState(node)).To(o.BeTrue(), "Node should remain Ready after monitoring") + o.Expect(IsNodeInReadyState(node)).To(o.BeTrue(), "Node should remain Ready after monitoring") framework.Logf("Node %s is in Ready state after 30 seconds", cnvWorkerNode) g.By("Checking for memory pressure conditions") @@ -597,7 +597,7 @@ var _ = g.Describe("[Jira:Node/Kubelet][sig-node][Feature:NodeSwap][Serial][Disr framework.Logf("=== TC9: Testing non-CNV workers have no swap configuration ===") // Get a CNV worker node and temporarily remove its CNV label - cnvWorkerNode = getCNVWorkerNodeName(ctx, oc) + cnvWorkerNode = GetCNVWorkerNodeName(ctx, oc) o.Expect(cnvWorkerNode).NotTo(o.BeEmpty(), "No CNV worker nodes available") cnvLabel := "kubevirt.io/schedulable" @@ -650,7 +650,7 @@ var _ = g.Describe("[Jira:Node/Kubelet][sig-node][Feature:NodeSwap][Serial][Disr g.By("Verifying kubelet swapBehavior is default (NoSwap)") framework.Logf("Running: oc get --raw \"/api/v1/nodes/%s/proxy/configz\" | jq '.kubeletconfig.memorySwap'", nonCNVWorkerNode) - config, err := getKubeletConfigFromNode(ctx, oc, nonCNVWorkerNode) + config, err := GetKubeletConfigFromNode(ctx, oc, nonCNVWorkerNode) o.Expect(err).NotTo(o.HaveOccurred()) framework.Logf("Kubelet swapBehavior: '%s'", config.MemorySwap.SwapBehavior) // Accept either empty string or "NoSwap" as valid NoSwap state @@ -668,7 +668,7 @@ var _ = g.Describe("[Jira:Node/Kubelet][sig-node][Feature:NodeSwap][Serial][Disr g.It("TC10: should apply correct precedence with multiple files", func(ctx context.Context) { skipOnSingleNodeTopology(oc) //skip this test for SNO // Get a CNV worker node for tests - cnvWorkerNode = getCNVWorkerNodeName(ctx, oc) + cnvWorkerNode = GetCNVWorkerNodeName(ctx, oc) o.Expect(cnvWorkerNode).NotTo(o.BeEmpty(), "No CNV worker nodes available") framework.Logf("=== TC10: Testing file precedence with multiple drop-in files ===") @@ -680,15 +680,15 @@ var _ = g.Describe("[Jira:Node/Kubelet][sig-node][Feature:NodeSwap][Serial][Disr g.By("Creating 98-swap-disabled.conf with NoSwap") framework.Logf("Creating file: %s", file98) - framework.Logf("Content:\n%s", loadConfigFromFile(cnvNoSwapConfigPath)) - err := createDropInFile(oc, cnvWorkerNode, file98, loadConfigFromFile(cnvNoSwapConfigPath)) + framework.Logf("Content:\n%s", LoadConfigFromFile(cnvNoSwapConfigPath)) + err := CreateDropInFile(oc, cnvWorkerNode, file98, LoadConfigFromFile(cnvNoSwapConfigPath)) o.Expect(err).NotTo(o.HaveOccurred()) framework.Logf("Created: %s (NoSwap)", file98) g.By("Creating 99-swap-limited.conf with LimitedSwap") framework.Logf("Creating file: %s", file99) - framework.Logf("Content:\n%s", loadConfigFromFile(cnvLimitedSwapConfigPath)) - err = createDropInFile(oc, cnvWorkerNode, file99, loadConfigFromFile(cnvLimitedSwapConfigPath)) + framework.Logf("Content:\n%s", LoadConfigFromFile(cnvLimitedSwapConfigPath)) + err = CreateDropInFile(oc, cnvWorkerNode, file99, LoadConfigFromFile(cnvLimitedSwapConfigPath)) o.Expect(err).NotTo(o.HaveOccurred()) framework.Logf("Created: %s (LimitedSwap)", file99) @@ -700,26 +700,26 @@ var _ = g.Describe("[Jira:Node/Kubelet][sig-node][Feature:NodeSwap][Serial][Disr defer func() { g.By("Cleaning up multiple config files") framework.Logf("Removing: %s", file98) - removeDropInFile(oc, cnvWorkerNode, file98) + RemoveDropInFile(oc, cnvWorkerNode, file98) framework.Logf("Removing: %s", file99) - removeDropInFile(oc, cnvWorkerNode, file99) + RemoveDropInFile(oc, cnvWorkerNode, file99) framework.Logf("Running: systemctl restart kubelet") - restartKubeletOnNode(ctx, oc, cnvWorkerNode) - waitForNodeToBeReady(ctx, oc, cnvWorkerNode) + RestartKubeletOnNode(ctx, oc, cnvWorkerNode) + WaitForNodeToBeReady(ctx, oc, cnvWorkerNode) framework.Logf("Cleanup completed") }() g.By("Restarting kubelet") framework.Logf("Running: systemctl restart kubelet") - err = restartKubeletOnNode(ctx, oc, cnvWorkerNode) + err = RestartKubeletOnNode(ctx, oc, cnvWorkerNode) o.Expect(err).NotTo(o.HaveOccurred()) framework.Logf("Waiting for node to be ready...") - waitForNodeToBeReady(ctx, oc, cnvWorkerNode) + WaitForNodeToBeReady(ctx, oc, cnvWorkerNode) framework.Logf("Node %s is Ready", cnvWorkerNode) g.By("Verifying 99-* file takes precedence (lexicographic order)") framework.Logf("Running: oc get --raw \"/api/v1/nodes/%s/proxy/configz\" | jq '.kubeletconfig.memorySwap'", cnvWorkerNode) - config, err := getKubeletConfigFromNode(ctx, oc, cnvWorkerNode) + config, err := GetKubeletConfigFromNode(ctx, oc, cnvWorkerNode) o.Expect(err).NotTo(o.HaveOccurred()) framework.Logf("Kubelet swapBehavior: '%s'", config.MemorySwap.SwapBehavior) o.Expect(config.MemorySwap.SwapBehavior).To(o.Equal("LimitedSwap"), @@ -740,7 +740,7 @@ var _ = g.Describe("[Jira:Node/Kubelet][sig-node][Feature:NodeSwap][Serial][Disr g.By("Getting all CNV worker nodes") // Get nodes with both worker role and CNV schedulable label - allWorkerNodes, err := getNodesByLabel(ctx, oc, "node-role.kubernetes.io/worker") + allWorkerNodes, err := GetNodesByLabel(ctx, oc, "node-role.kubernetes.io/worker") o.Expect(err).NotTo(o.HaveOccurred()) var cnvNodes []string @@ -762,10 +762,10 @@ var _ = g.Describe("[Jira:Node/Kubelet][sig-node][Feature:NodeSwap][Serial][Disr g.By("Deploying drop-in configuration to all CNV nodes") framework.Logf("Drop-in file: %s", cnvDropInFilePath) - framework.Logf("Content:\n%s", loadConfigFromFile(cnvLimitedSwapConfigPath)) + framework.Logf("Content:\n%s", LoadConfigFromFile(cnvLimitedSwapConfigPath)) for _, node := range cnvNodes { framework.Logf("Creating drop-in file on node: %s", node) - err := createDropInFile(oc, node, cnvDropInFilePath, loadConfigFromFile(cnvLimitedSwapConfigPath)) + err := CreateDropInFile(oc, node, cnvDropInFilePath, LoadConfigFromFile(cnvLimitedSwapConfigPath)) o.Expect(err).NotTo(o.HaveOccurred()) framework.Logf(" -> Created successfully on %s", node) } @@ -774,13 +774,13 @@ var _ = g.Describe("[Jira:Node/Kubelet][sig-node][Feature:NodeSwap][Serial][Disr g.By("Cleaning up all CNV nodes") for _, node := range cnvNodes { framework.Logf("Removing drop-in file from node: %s", node) - removeDropInFile(oc, node, cnvDropInFilePath) + RemoveDropInFile(oc, node, cnvDropInFilePath) framework.Logf("Restarting kubelet on node: %s", node) - restartKubeletOnNode(ctx, oc, node) + RestartKubeletOnNode(ctx, oc, node) } for _, node := range cnvNodes { framework.Logf("Waiting for node %s to be ready...", node) - waitForNodeToBeReady(ctx, oc, node) + WaitForNodeToBeReady(ctx, oc, node) } framework.Logf("Cleanup completed on all %d CNV nodes", len(cnvNodes)) }() @@ -813,21 +813,21 @@ var _ = g.Describe("[Jira:Node/Kubelet][sig-node][Feature:NodeSwap][Serial][Disr g.By("Restarting kubelet on all CNV nodes") for _, node := range cnvNodes { framework.Logf("Running: systemctl restart kubelet on node %s", node) - err := restartKubeletOnNode(ctx, oc, node) + err := RestartKubeletOnNode(ctx, oc, node) o.Expect(err).NotTo(o.HaveOccurred()) } g.By("Waiting for all nodes to be ready") for _, node := range cnvNodes { framework.Logf("Waiting for node %s to be Ready...", node) - waitForNodeToBeReady(ctx, oc, node) + WaitForNodeToBeReady(ctx, oc, node) framework.Logf("Node %s is Ready", node) } g.By("Verifying consistent swapBehavior across all CNV nodes") framework.Logf("Running: oc get --raw \"/api/v1/nodes//proxy/configz\" | jq '.kubeletconfig.memorySwap' for each node") for _, node := range cnvNodes { - config, err := getKubeletConfigFromNode(ctx, oc, node) + config, err := GetKubeletConfigFromNode(ctx, oc, node) o.Expect(err).NotTo(o.HaveOccurred()) framework.Logf("✅ Node %s: swapBehavior = '%s'", node, config.MemorySwap.SwapBehavior) o.Expect(config.MemorySwap.SwapBehavior).To(o.Equal("LimitedSwap"), @@ -838,7 +838,7 @@ var _ = g.Describe("[Jira:Node/Kubelet][sig-node][Feature:NodeSwap][Serial][Disr for _, node := range cnvNodes { nodeObj, err := oc.AdminKubeClient().CoreV1().Nodes().Get(ctx, node, metav1.GetOptions{}) o.Expect(err).NotTo(o.HaveOccurred()) - o.Expect(isNodeInReadyState(nodeObj)).To(o.BeTrue(), "Node %s should be Ready", node) + o.Expect(IsNodeInReadyState(nodeObj)).To(o.BeTrue(), "Node %s should be Ready", node) framework.Logf("Node %s status: Ready", node) } @@ -865,7 +865,7 @@ var _ = g.Describe("[Jira:Node/Kubelet][sig-node][Feature:NodeSwap][Serial][Disr g.By("Verifying swapBehavior consistency after wait period") for _, node := range cnvNodes { - config, err := getKubeletConfigFromNode(ctx, oc, node) + config, err := GetKubeletConfigFromNode(ctx, oc, node) o.Expect(err).NotTo(o.HaveOccurred()) framework.Logf("Node %s (after wait): swapBehavior = '%s'", node, config.MemorySwap.SwapBehavior) o.Expect(config.MemorySwap.SwapBehavior).To(o.Equal("LimitedSwap"), @@ -888,7 +888,7 @@ var _ = g.Describe("[Jira:Node/Kubelet][sig-node][Feature:NodeSwap][Serial][Disr framework.Logf("=== TC12: Testing LimitedSwap config when OS swap is disabled ===") // Get a CNV worker node for tests - cnvWorkerNode = getCNVWorkerNodeName(ctx, oc) + cnvWorkerNode = GetCNVWorkerNodeName(ctx, oc) o.Expect(cnvWorkerNode).NotTo(o.BeEmpty(), "No CNV worker nodes available") framework.Logf("Executing on node: %s", cnvWorkerNode) @@ -932,41 +932,41 @@ var _ = g.Describe("[Jira:Node/Kubelet][sig-node][Feature:NodeSwap][Serial][Disr g.By("Creating LimitedSwap drop-in configuration") framework.Logf("Creating drop-in file: %s", cnvDropInFilePath) - framework.Logf("Content:\n%s", loadConfigFromFile(cnvLimitedSwapConfigPath)) - err = createDropInFile(oc, cnvWorkerNode, cnvDropInFilePath, loadConfigFromFile(cnvLimitedSwapConfigPath)) + framework.Logf("Content:\n%s", LoadConfigFromFile(cnvLimitedSwapConfigPath)) + err = CreateDropInFile(oc, cnvWorkerNode, cnvDropInFilePath, LoadConfigFromFile(cnvLimitedSwapConfigPath)) o.Expect(err).NotTo(o.HaveOccurred()) framework.Logf("Drop-in file created successfully") defer func() { g.By("Cleaning up") framework.Logf("Removing drop-in file: %s", cnvDropInFilePath) - removeDropInFile(oc, cnvWorkerNode, cnvDropInFilePath) + RemoveDropInFile(oc, cnvWorkerNode, cnvDropInFilePath) // Re-enable swap if it was initially present if initialHasSwap { framework.Logf("Note: OS swap was initially enabled, may need manual re-enable") } framework.Logf("Restarting kubelet on node: %s", cnvWorkerNode) - restartKubeletOnNode(ctx, oc, cnvWorkerNode) - waitForNodeToBeReady(ctx, oc, cnvWorkerNode) + RestartKubeletOnNode(ctx, oc, cnvWorkerNode) + WaitForNodeToBeReady(ctx, oc, cnvWorkerNode) }() g.By("Restarting kubelet with LimitedSwap config but no OS swap") framework.Logf("Running: systemctl restart kubelet on node %s", cnvWorkerNode) - err = restartKubeletOnNode(ctx, oc, cnvWorkerNode) + err = RestartKubeletOnNode(ctx, oc, cnvWorkerNode) o.Expect(err).NotTo(o.HaveOccurred()) framework.Logf("Waiting for node to be ready...") - waitForNodeToBeReady(ctx, oc, cnvWorkerNode) + WaitForNodeToBeReady(ctx, oc, cnvWorkerNode) framework.Logf("Node %s is Ready", cnvWorkerNode) g.By("Verifying node status is Ready (no crash or failure)") node, err := oc.AdminKubeClient().CoreV1().Nodes().Get(ctx, cnvWorkerNode, metav1.GetOptions{}) o.Expect(err).NotTo(o.HaveOccurred()) - o.Expect(isNodeInReadyState(node)).To(o.BeTrue(), "Node should remain Ready even with LimitedSwap but no OS swap") + o.Expect(IsNodeInReadyState(node)).To(o.BeTrue(), "Node should remain Ready even with LimitedSwap but no OS swap") framework.Logf("Node %s status: Ready (no crash)", cnvWorkerNode) g.By("Verifying kubelet loaded LimitedSwap configuration") framework.Logf("Running: oc get --raw \"/api/v1/nodes/%s/proxy/configz\" | jq '.kubeletconfig.memorySwap'", cnvWorkerNode) - config, err := getKubeletConfigFromNode(ctx, oc, cnvWorkerNode) + config, err := GetKubeletConfigFromNode(ctx, oc, cnvWorkerNode) o.Expect(err).NotTo(o.HaveOccurred()) framework.Logf("Kubelet swapBehavior: '%s'", config.MemorySwap.SwapBehavior) o.Expect(config.MemorySwap.SwapBehavior).To(o.Equal("LimitedSwap"), @@ -1049,7 +1049,7 @@ var _ = g.Describe("[Jira:Node/Kubelet][sig-node][Feature:NodeSwap][Serial][Disr g.It("TC13: should work correctly with various swap sizes", func(ctx context.Context) { skipOnSingleNodeTopology(oc) //skip this test for SNO // Get a CNV worker node for tests - cnvWorkerNode = getCNVWorkerNodeName(ctx, oc) + cnvWorkerNode = GetCNVWorkerNodeName(ctx, oc) o.Expect(cnvWorkerNode).NotTo(o.BeEmpty(), "No CNV worker nodes available") framework.Logf("=== TC13: Testing LimitedSwap with various swap sizes ===") @@ -1069,8 +1069,8 @@ var _ = g.Describe("[Jira:Node/Kubelet][sig-node][Feature:NodeSwap][Serial][Disr g.By("Creating LimitedSwap drop-in configuration") framework.Logf("Creating drop-in file: %s", cnvDropInFilePath) - framework.Logf("Content:\n%s", loadConfigFromFile(cnvLimitedSwapConfigPath)) - err := createDropInFile(oc, cnvWorkerNode, cnvDropInFilePath, loadConfigFromFile(cnvLimitedSwapConfigPath)) + framework.Logf("Content:\n%s", LoadConfigFromFile(cnvLimitedSwapConfigPath)) + err := CreateDropInFile(oc, cnvWorkerNode, cnvDropInFilePath, LoadConfigFromFile(cnvLimitedSwapConfigPath)) o.Expect(err).NotTo(o.HaveOccurred()) framework.Logf("Drop-in file created successfully") @@ -1082,10 +1082,10 @@ var _ = g.Describe("[Jira:Node/Kubelet][sig-node][Feature:NodeSwap][Serial][Disr ExecOnNodeWithChroot(oc, cnvWorkerNode, "rm", "-f", swapFilePath) // Remove drop-in config framework.Logf("Removing drop-in file: %s", cnvDropInFilePath) - removeDropInFile(oc, cnvWorkerNode, cnvDropInFilePath) + RemoveDropInFile(oc, cnvWorkerNode, cnvDropInFilePath) framework.Logf("Restarting kubelet") - restartKubeletOnNode(ctx, oc, cnvWorkerNode) - waitForNodeToBeReady(ctx, oc, cnvWorkerNode) + RestartKubeletOnNode(ctx, oc, cnvWorkerNode) + WaitForNodeToBeReady(ctx, oc, cnvWorkerNode) framework.Logf("Final cleanup completed") }() @@ -1153,18 +1153,18 @@ var _ = g.Describe("[Jira:Node/Kubelet][sig-node][Feature:NodeSwap][Serial][Disr g.By(fmt.Sprintf("Restarting kubelet with %s swap", swapSize.name)) framework.Logf("Running: systemctl restart kubelet") - err = restartKubeletOnNode(ctx, oc, cnvWorkerNode) + err = RestartKubeletOnNode(ctx, oc, cnvWorkerNode) o.Expect(err).NotTo(o.HaveOccurred()) - waitForNodeToBeReady(ctx, oc, cnvWorkerNode) + WaitForNodeToBeReady(ctx, oc, cnvWorkerNode) g.By(fmt.Sprintf("Verifying node status with %s swap", swapSize.name)) node, err := oc.AdminKubeClient().CoreV1().Nodes().Get(ctx, cnvWorkerNode, metav1.GetOptions{}) o.Expect(err).NotTo(o.HaveOccurred()) - result.nodeReady = isNodeInReadyState(node) + result.nodeReady = IsNodeInReadyState(node) framework.Logf("Node %s status: Ready=%v", cnvWorkerNode, result.nodeReady) g.By(fmt.Sprintf("Verifying kubelet config with %s swap", swapSize.name)) - config, err := getKubeletConfigFromNode(ctx, oc, cnvWorkerNode) + config, err := GetKubeletConfigFromNode(ctx, oc, cnvWorkerNode) o.Expect(err).NotTo(o.HaveOccurred()) result.configOK = config.MemorySwap.SwapBehavior == "LimitedSwap" framework.Logf("Kubelet swapBehavior: '%s' (expected: LimitedSwap)", config.MemorySwap.SwapBehavior) @@ -1225,7 +1225,7 @@ var _ = g.Describe("[Jira:Node/Kubelet][sig-node][Feature:NodeSwap][Serial][Disr g.It("TC14: should expose swap metrics correctly via Prometheus", func(ctx context.Context) { skipOnSingleNodeTopology(oc) //skip this test for SNO // Get a CNV worker node for tests - cnvWorkerNode = getCNVWorkerNodeName(ctx, oc) + cnvWorkerNode = GetCNVWorkerNodeName(ctx, oc) o.Expect(cnvWorkerNode).NotTo(o.BeEmpty(), "No CNV worker nodes available") framework.Logf("=== TC14: Testing swap metrics and observability via Prometheus ===") @@ -1284,27 +1284,27 @@ var _ = g.Describe("[Jira:Node/Kubelet][sig-node][Feature:NodeSwap][Serial][Disr framework.Logf("Removing swap file: rm -f %s", swapFilePath) ExecOnNodeWithChroot(oc, cnvWorkerNode, "rm", "-f", swapFilePath) } - cleanupDropInAndRestartKubelet(ctx, oc, cnvWorkerNode, cnvDropInFilePath) + CleanupDropInAndRestartKubelet(ctx, oc, cnvWorkerNode, cnvDropInFilePath) }() g.By("Creating LimitedSwap configuration") framework.Logf("Creating drop-in file: %s", cnvDropInFilePath) - framework.Logf("Content:\n%s", loadConfigFromFile(cnvLimitedSwapConfigPath)) - err := createDropInFile(oc, cnvWorkerNode, cnvDropInFilePath, loadConfigFromFile(cnvLimitedSwapConfigPath)) + framework.Logf("Content:\n%s", LoadConfigFromFile(cnvLimitedSwapConfigPath)) + err := CreateDropInFile(oc, cnvWorkerNode, cnvDropInFilePath, LoadConfigFromFile(cnvLimitedSwapConfigPath)) o.Expect(err).NotTo(o.HaveOccurred()) framework.Logf("Drop-in file created successfully") g.By("Restarting kubelet") framework.Logf("Running: systemctl restart kubelet") - err = restartKubeletOnNode(ctx, oc, cnvWorkerNode) + err = RestartKubeletOnNode(ctx, oc, cnvWorkerNode) o.Expect(err).NotTo(o.HaveOccurred()) framework.Logf("Waiting for node to be ready...") - waitForNodeToBeReady(ctx, oc, cnvWorkerNode) + WaitForNodeToBeReady(ctx, oc, cnvWorkerNode) framework.Logf("Node %s is Ready", cnvWorkerNode) g.By("Verifying kubelet LimitedSwap configuration") framework.Logf("Running: oc get --raw \"/api/v1/nodes/%s/proxy/configz\" | jq '.kubeletconfig.memorySwap'", cnvWorkerNode) - config, err := getKubeletConfigFromNode(ctx, oc, cnvWorkerNode) + config, err := GetKubeletConfigFromNode(ctx, oc, cnvWorkerNode) o.Expect(err).NotTo(o.HaveOccurred()) framework.Logf("Kubelet swapBehavior: '%s'", config.MemorySwap.SwapBehavior) @@ -1431,7 +1431,7 @@ var _ = g.Describe("[Jira:Node/Kubelet][sig-node][Feature:NodeSwap][Serial][Disr g.By("Verifying node remains Ready with metrics collection active") node, err := oc.AdminKubeClient().CoreV1().Nodes().Get(ctx, cnvWorkerNode, metav1.GetOptions{}) o.Expect(err).NotTo(o.HaveOccurred()) - o.Expect(isNodeInReadyState(node)).To(o.BeTrue(), "Node should remain Ready") + o.Expect(IsNodeInReadyState(node)).To(o.BeTrue(), "Node should remain Ready") framework.Logf("Node %s is Ready", cnvWorkerNode) osSwapStatus := "enabled" diff --git a/test/extended/node/node_utils.go b/test/extended/node/node_utils.go index 8af84984b0d9..1c3a62dbd8e0 100644 --- a/test/extended/node/node_utils.go +++ b/test/extended/node/node_utils.go @@ -1,3 +1,4 @@ +// Package node provides utility functions for OpenShift node E2E tests. See CLAUDE.md for usage guide. package node import ( @@ -26,7 +27,7 @@ import ( ) // getNodesByLabel returns nodes matching the specified label selector -func getNodesByLabel(ctx context.Context, oc *exutil.CLI, labelSelector string) ([]corev1.Node, error) { +func GetNodesByLabel(ctx context.Context, oc *exutil.CLI, labelSelector string) ([]corev1.Node, error) { nodes, err := oc.AdminKubeClient().CoreV1().Nodes().List(ctx, metav1.ListOptions{ LabelSelector: labelSelector, }) @@ -37,9 +38,9 @@ func getNodesByLabel(ctx context.Context, oc *exutil.CLI, labelSelector string) } // getControlPlaneNodes returns all control plane nodes in the cluster -func getControlPlaneNodes(ctx context.Context, oc *exutil.CLI) ([]corev1.Node, error) { +func GetControlPlaneNodes(ctx context.Context, oc *exutil.CLI) ([]corev1.Node, error) { // Try master label first (OpenShift uses this) - nodes, err := getNodesByLabel(ctx, oc, "node-role.kubernetes.io/master") + nodes, err := GetNodesByLabel(ctx, oc, "node-role.kubernetes.io/master") if err != nil { return nil, err } @@ -48,11 +49,11 @@ func getControlPlaneNodes(ctx context.Context, oc *exutil.CLI) ([]corev1.Node, e } // Fallback to control-plane label (upstream Kubernetes uses this) - return getNodesByLabel(ctx, oc, "node-role.kubernetes.io/control-plane") + return GetNodesByLabel(ctx, oc, "node-role.kubernetes.io/control-plane") } // getKubeletConfigFromNode retrieves the kubelet configuration from a specific node -func getKubeletConfigFromNode(ctx context.Context, oc *exutil.CLI, nodeName string) (*kubeletconfigv1beta1.KubeletConfiguration, error) { +func GetKubeletConfigFromNode(ctx context.Context, oc *exutil.CLI, nodeName string) (*kubeletconfigv1beta1.KubeletConfiguration, error) { // Use the node proxy API to get configz configzPath := fmt.Sprintf("/api/v1/nodes/%s/proxy/configz", nodeName) @@ -80,7 +81,7 @@ func getKubeletConfigFromNode(ctx context.Context, oc *exutil.CLI, nodeName stri // getPureWorkerNodes returns worker nodes that are not also control plane nodes. // On SNO clusters, the single node has both worker and control-plane roles, // so it should be validated as a control plane node (failSwapOn=true), not as a worker. -func getPureWorkerNodes(nodes []corev1.Node) []corev1.Node { +func GetPureWorkerNodes(nodes []corev1.Node) []corev1.Node { var pureWorkers []corev1.Node for _, node := range nodes { _, hasControlPlane := node.Labels["node-role.kubernetes.io/control-plane"] @@ -139,16 +140,16 @@ var ( ) // getCNVWorkerNodeName returns the name of a worker node with CNV label (kubevirt.io/schedulable=true) -func getCNVWorkerNodeName(ctx context.Context, oc *exutil.CLI) string { +func GetCNVWorkerNodeName(ctx context.Context, oc *exutil.CLI) string { // First try to get nodes with CNV schedulable label - nodes, err := getNodesByLabel(ctx, oc, "kubevirt.io/schedulable=true") + nodes, err := GetNodesByLabel(ctx, oc, "kubevirt.io/schedulable=true") if err == nil && len(nodes) > 0 { // Randomly select a node from the available CNV nodes return nodes[rand.Intn(len(nodes))].Name } // Fallback to any worker node - nodes, err = getNodesByLabel(ctx, oc, "node-role.kubernetes.io/worker") + nodes, err = GetNodesByLabel(ctx, oc, "node-role.kubernetes.io/worker") if err != nil || len(nodes) == 0 { return "" } @@ -173,7 +174,7 @@ func ExecOnNodeWithNsenter(oc *exutil.CLI, nodeName string, cmd ...string) (stri } // createDropInFile creates a drop-in configuration file on the specified node -func createDropInFile(oc *exutil.CLI, nodeName, filePath, content string) error { +func CreateDropInFile(oc *exutil.CLI, nodeName, filePath, content string) error { // Escape content for shell escapedContent := strings.ReplaceAll(content, "'", "'\\''") cmd := fmt.Sprintf("echo '%s' > %s && chmod 644 %s", escapedContent, filePath, filePath) @@ -182,13 +183,13 @@ func createDropInFile(oc *exutil.CLI, nodeName, filePath, content string) error } // removeDropInFile removes a drop-in configuration file from the specified node -func removeDropInFile(oc *exutil.CLI, nodeName, filePath string) error { +func RemoveDropInFile(oc *exutil.CLI, nodeName, filePath string) error { _, err := ExecOnNodeWithChroot(oc, nodeName, "rm", "-f", filePath) return err } // loadConfigFromFile reads kubelet configuration from a YAML file -func loadConfigFromFile(path string) string { +func LoadConfigFromFile(path string) string { data, err := os.ReadFile(path) if err != nil { framework.Failf("Failed to read config file %s: %v", path, err) @@ -198,7 +199,7 @@ func loadConfigFromFile(path string) string { // restartKubeletOnNode restarts the kubelet service on the specified node // Retries on transient network errors which are common on real clusters -func restartKubeletOnNode(ctx context.Context, oc *exutil.CLI, nodeName string) error { +func RestartKubeletOnNode(ctx context.Context, oc *exutil.CLI, nodeName string) error { const maxAttempts = 3 var lastErr error for attempt := 0; attempt < maxAttempts; attempt++ { @@ -248,18 +249,18 @@ func isTransientNetworkError(err error) bool { } // waitForNodeToBeReady waits for a node to become Ready -func waitForNodeToBeReady(ctx context.Context, oc *exutil.CLI, nodeName string) { +func WaitForNodeToBeReady(ctx context.Context, oc *exutil.CLI, nodeName string) { o.Eventually(func() bool { node, err := oc.AdminKubeClient().CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{}) if err != nil { return false } - return isNodeInReadyState(node) + return IsNodeInReadyState(node) }, 5*time.Minute, 10*time.Second).Should(o.BeTrue(), "Node %s should become Ready", nodeName) } // isNodeInReadyState checks if a node is in Ready condition -func isNodeInReadyState(node *corev1.Node) bool { +func IsNodeInReadyState(node *corev1.Node) bool { for _, condition := range node.Status.Conditions { if condition.Type == corev1.NodeReady && condition.Status == corev1.ConditionTrue { return true @@ -269,13 +270,13 @@ func isNodeInReadyState(node *corev1.Node) bool { } // cleanupDropInAndRestartKubelet removes the drop-in file and restarts kubelet -func cleanupDropInAndRestartKubelet(ctx context.Context, oc *exutil.CLI, nodeName, filePath string) { +func CleanupDropInAndRestartKubelet(ctx context.Context, oc *exutil.CLI, nodeName, filePath string) { framework.Logf("Removing drop-in file: %s", filePath) - removeDropInFile(oc, nodeName, filePath) + RemoveDropInFile(oc, nodeName, filePath) framework.Logf("Restarting kubelet on node: %s", nodeName) - restartKubeletOnNode(ctx, oc, nodeName) + RestartKubeletOnNode(ctx, oc, nodeName) framework.Logf("Waiting for node to be ready...") - waitForNodeToBeReady(ctx, oc, nodeName) + WaitForNodeToBeReady(ctx, oc, nodeName) } // ============================================================================ @@ -283,7 +284,7 @@ func cleanupDropInAndRestartKubelet(ctx context.Context, oc *exutil.CLI, nodeNam // ============================================================================ // isCNVInstalled checks if CNV operator is installed -func isCNVInstalled(ctx context.Context, oc *exutil.CLI) bool { +func IsCNVInstalled(ctx context.Context, oc *exutil.CLI) bool { // Check if CNV namespace exists _, err := oc.AdminKubeClient().CoreV1().Namespaces().Get(ctx, cnvNamespace, metav1.GetOptions{}) if err != nil { @@ -297,7 +298,7 @@ func isCNVInstalled(ctx context.Context, oc *exutil.CLI) bool { } // installCNVOperator installs the CNV operator and creates HyperConverged CR -func installCNVOperator(ctx context.Context, oc *exutil.CLI) error { +func InstallCNVOperator(ctx context.Context, oc *exutil.CLI) error { framework.Logf("Installing CNV operator...") dynamicClient := oc.AdminDynamicClient() @@ -430,7 +431,7 @@ func installCNVOperator(ctx context.Context, oc *exutil.CLI) error { // Step 7: Label worker nodes for CNV framework.Logf("Labeling worker nodes for CNV...") - err = labelWorkerNodesForCNV(ctx, oc) + err = LabelWorkerNodesForCNV(ctx, oc) if err != nil { framework.Logf("Warning: failed to label nodes for CNV: %v", err) } @@ -442,7 +443,7 @@ func installCNVOperator(ctx context.Context, oc *exutil.CLI) error { return fmt.Errorf("failed to create MC client for MCP check: %w", err) } - err = waitForMCP(ctx, mcClient, "worker", 30*time.Minute) + err = WaitForMCP(ctx, mcClient, "worker", 30*time.Minute) if err != nil { return fmt.Errorf("MCP rollout failed after CNV installation: %w", err) } @@ -518,7 +519,7 @@ func waitForHyperConvergedReady(ctx context.Context, oc *exutil.CLI) error { // waitForMCP waits for a MachineConfigPool to be ready (not updating, updated, and all machines ready) // Returns error immediately if the MCP becomes degraded -func waitForMCP(ctx context.Context, mcClient *machineconfigclient.Clientset, poolName string, timeout time.Duration) error { +func WaitForMCP(ctx context.Context, mcClient *machineconfigclient.Clientset, poolName string, timeout time.Duration) error { framework.Logf("Waiting for MCP %s to be ready (timeout: %v)...", poolName, timeout) return wait.PollUntilContextTimeout(ctx, 10*time.Second, timeout, true, func(ctx context.Context) (bool, error) { @@ -571,7 +572,7 @@ func waitForMCP(ctx context.Context, mcClient *machineconfigclient.Clientset, po // getWorkerGeneratedKubeletMC finds and returns the highest numbered worker-generated-kubelet MachineConfig. // KubeletConfig changes affect the highest numbered config, so we return that one. -func getWorkerGeneratedKubeletMC(ctx context.Context, mcClient *machineconfigclient.Clientset) (*machineconfigv1.MachineConfig, error) { +func GetWorkerGeneratedKubeletMC(ctx context.Context, mcClient *machineconfigclient.Clientset) (*machineconfigv1.MachineConfig, error) { mcList, err := mcClient.MachineconfigurationV1().MachineConfigs().List(ctx, metav1.ListOptions{}) if err != nil { return nil, err @@ -594,10 +595,10 @@ func getWorkerGeneratedKubeletMC(ctx context.Context, mcClient *machineconfigcli } // labelWorkerNodesForCNV labels all worker nodes with kubevirt.io/schedulable=true -func labelWorkerNodesForCNV(ctx context.Context, oc *exutil.CLI) error { +func LabelWorkerNodesForCNV(ctx context.Context, oc *exutil.CLI) error { framework.Logf("Labeling worker nodes for CNV...") - nodes, err := getNodesByLabel(ctx, oc, "node-role.kubernetes.io/worker") + nodes, err := GetNodesByLabel(ctx, oc, "node-role.kubernetes.io/worker") if err != nil { return fmt.Errorf("failed to get worker nodes: %w", err) } @@ -619,10 +620,10 @@ func labelWorkerNodesForCNV(ctx context.Context, oc *exutil.CLI) error { } // unlabelWorkerNodesForCNV removes the kubevirt.io/schedulable label from worker nodes -func unlabelWorkerNodesForCNV(ctx context.Context, oc *exutil.CLI) error { +func UnlabelWorkerNodesForCNV(ctx context.Context, oc *exutil.CLI) error { framework.Logf("Removing CNV labels from worker nodes...") - nodes, err := getNodesByLabel(ctx, oc, cnvNodeLabel+"=true") + nodes, err := GetNodesByLabel(ctx, oc, cnvNodeLabel+"=true") if err != nil { return fmt.Errorf("failed to get CNV-labeled nodes: %w", err) } @@ -641,7 +642,7 @@ func unlabelWorkerNodesForCNV(ctx context.Context, oc *exutil.CLI) error { } // uninstallCNVOperator uninstalls the CNV operator and all related resources -func uninstallCNVOperator(ctx context.Context, oc *exutil.CLI) error { +func UninstallCNVOperator(ctx context.Context, oc *exutil.CLI) error { framework.Logf("Uninstalling CNV operator...") dynamicClient := oc.AdminDynamicClient() @@ -689,7 +690,7 @@ func uninstallCNVOperator(ctx context.Context, oc *exutil.CLI) error { // Step 5: Remove node labels framework.Logf("Removing CNV node labels...") - _ = unlabelWorkerNodesForCNV(ctx, oc) + _ = UnlabelWorkerNodesForCNV(ctx, oc) // Step 6: Delete namespace framework.Logf("Deleting namespace %s", cnvNamespace) @@ -715,7 +716,7 @@ func uninstallCNVOperator(ctx context.Context, oc *exutil.CLI) error { if err != nil { framework.Logf("Warning: failed to create MC client for MCP check: %v", err) } else { - err = waitForMCP(ctx, mcClient, "worker", 30*time.Minute) + err = WaitForMCP(ctx, mcClient, "worker", 30*time.Minute) if err != nil { framework.Logf("Warning: MCP rollout check failed: %v", err) } @@ -726,8 +727,8 @@ func uninstallCNVOperator(ctx context.Context, oc *exutil.CLI) error { } // ensureDropInDirectoryExists creates the drop-in directory on worker nodes if it doesn't exist -func ensureDropInDirectoryExists(ctx context.Context, oc *exutil.CLI, dirPath string) error { - nodes, err := getNodesByLabel(ctx, oc, "node-role.kubernetes.io/worker") +func EnsureDropInDirectoryExists(ctx context.Context, oc *exutil.CLI, dirPath string) error { + nodes, err := GetNodesByLabel(ctx, oc, "node-role.kubernetes.io/worker") if err != nil { return fmt.Errorf("failed to get worker nodes: %w", err) } diff --git a/test/extended/node/system_compressible.go b/test/extended/node/system_compressible.go index 6eb0dcc6351d..76741377bd3c 100644 --- a/test/extended/node/system_compressible.go +++ b/test/extended/node/system_compressible.go @@ -43,7 +43,7 @@ var _ = g.Describe("[Suite:openshift/disruptive-longrunning][sig-node][Disruptiv framework.Logf("Testing on node: %s with %d CPUs", nodeName, cpuCount) // Get kubelet config and verify system compressible is enabled - config, err := getKubeletConfigFromNode(ctx, oc, nodeName) + config, err := GetKubeletConfigFromNode(ctx, oc, nodeName) o.Expect(err).NotTo(o.HaveOccurred(), "Should be able to read kubelet config") // Skip if reserved CPU is enabled @@ -154,7 +154,7 @@ var _ = g.Describe("[Suite:openshift/disruptive-longrunning][sig-node][Disruptiv // Wait for worker MCP to stabilize after custom MCP deletion g.By("Waiting for worker MCP to stabilize after custom MCP deletion") - waitErr := waitForMCP(cleanupCtx, mcClient, "worker", 10*time.Minute) + waitErr := WaitForMCP(cleanupCtx, mcClient, "worker", 10*time.Minute) if apierrors.IsNotFound(waitErr) { // MachineConfigPool already deleted, nothing to wait for } else if waitErr != nil { @@ -208,7 +208,7 @@ var _ = g.Describe("[Suite:openshift/disruptive-longrunning][sig-node][Disruptiv // Wait for MCP ready g.By("Waiting for custom MachineConfigPool to be ready") - err = waitForMCP(ctx, mcClient, testMCPName, 5*time.Minute) + err = WaitForMCP(ctx, mcClient, testMCPName, 5*time.Minute) o.Expect(err).NotTo(o.HaveOccurred(), "MCP should be ready") // Create KubeletConfig to disable system compressible @@ -254,11 +254,11 @@ var _ = g.Describe("[Suite:openshift/disruptive-longrunning][sig-node][Disruptiv // Wait for MCP to apply configuration g.By("Waiting for MCP to update with new configuration") - err = waitForMCP(ctx, mcClient, testMCPName, 15*time.Minute) + err = WaitForMCP(ctx, mcClient, testMCPName, 15*time.Minute) o.Expect(err).NotTo(o.HaveOccurred(), "MCP should update successfully") // Verify system compressible is disabled - config, err := getKubeletConfigFromNode(ctx, oc, nodeName) + config, err := GetKubeletConfigFromNode(ctx, oc, nodeName) o.Expect(err).NotTo(o.HaveOccurred(), "Should be able to read kubelet config") o.Expect(isSystemCompressibleEnabled(config)).To(o.BeFalse(), "System compressible should be disabled") @@ -347,7 +347,7 @@ var _ = g.Describe("[Suite:openshift/disruptive-longrunning][sig-node][Disruptiv // Wait for worker MCP to stabilize after custom MCP deletion g.By("Waiting for worker MCP to stabilize after custom MCP deletion") - waitErr := waitForMCP(cleanupCtx, mcClient, "worker", 10*time.Minute) + waitErr := WaitForMCP(cleanupCtx, mcClient, "worker", 10*time.Minute) if apierrors.IsNotFound(waitErr) { // MachineConfigPool already deleted, nothing to wait for } else if waitErr != nil { @@ -401,7 +401,7 @@ var _ = g.Describe("[Suite:openshift/disruptive-longrunning][sig-node][Disruptiv // Wait for MCP ready g.By("Waiting for custom MachineConfigPool to be ready") - err = waitForMCP(ctx, mcClient, testMCPName, 5*time.Minute) + err = WaitForMCP(ctx, mcClient, testMCPName, 5*time.Minute) o.Expect(err).NotTo(o.HaveOccurred(), "MCP should be ready") // Configure static CPU manager with reserved CPUs @@ -447,11 +447,11 @@ var _ = g.Describe("[Suite:openshift/disruptive-longrunning][sig-node][Disruptiv // Wait for configuration g.By("Waiting for MCP to update with reserved CPU configuration") - err = waitForMCP(ctx, mcClient, testMCPName, 15*time.Minute) + err = WaitForMCP(ctx, mcClient, testMCPName, 15*time.Minute) o.Expect(err).NotTo(o.HaveOccurred(), "MCP should update successfully") // Verify reserved CPU is enabled - config, err := getKubeletConfigFromNode(ctx, oc, nodeName) + config, err := GetKubeletConfigFromNode(ctx, oc, nodeName) o.Expect(err).NotTo(o.HaveOccurred(), "Should be able to read kubelet config") o.Expect(isReservedCPUEnabled(config)).To(o.BeTrue(), "Reserved CPU should be enabled")