diff --git a/.github/actions/run-hostbusters-dualstack-test-suites/action.yaml b/.github/actions/run-hostbusters-dualstack-test-suites/action.yaml index 2707ed021..2926ec298 100644 --- a/.github/actions/run-hostbusters-dualstack-test-suites/action.yaml +++ b/.github/actions/run-hostbusters-dualstack-test-suites/action.yaml @@ -86,14 +86,29 @@ runs: fi gotestsum --format standard-verbose \ - --packages=github.com/rancher/tests/validation/snapshot/dualstack \ + --packages=github.com/rancher/tests/validation/snapshot/rke2/dualstack \ --junitfile results.xml \ - --jsonfile results_snapshot.json \ + --jsonfile results_rke2_snapshot.json \ -- -timeout=5h -tags=recurring -v - snap_restore_exit=$? - echo "snap_restore_exit=$snap_restore_exit" >> "$GITHUB_ENV" - cp results_snapshot.json results.json + rke2_snapshot_exit=$? + echo "rke2_snapshot_exit=$rke2_snapshot_exit" >> "$GITHUB_ENV" + cp results_rke2_snapshot.json results.json + + if [[ "${{ inputs.reporting }}" == "true" ]]; then + ./validation/pipeline/scripts/build_qase_reporter_v2.sh; + ./validation/reporter + fi + + gotestsum --format standard-verbose \ + --packages=github.com/rancher/tests/validation/snapshot/k3s/dualstack \ + --junitfile results.xml \ + --jsonfile results_k3s_snapshot.json \ + -- -timeout=5h -tags=recurring -v + + k3s_snapshot_exit=$? + echo "k3s_snapshot_exit=$k3s_snapshot_exit" >> "$GITHUB_ENV" + cp results_k3s_snapshot.json results.json if [[ "${{ inputs.reporting }}" == "true" ]]; then ./validation/pipeline/scripts/build_qase_reporter_v2.sh; @@ -125,7 +140,8 @@ runs: [delete_exit]="Delete Cluster:results_delete.json" [node_scale_exit]="Node Scaling:results_node_scale.json" [prov_exit]="Provisioning:results_prov.json" - [snap_restore_exit]="Snapshot Restore:results_snapshot.json" + [rke2_snapshot_exit]="RKE2:results_rke2_snapshot.json" + [k3s_snapshot_exit]="K3S:results_k3s_snapshot.json" [upgrade_exit]="Kubernetes Upgrade:results_upgrade.json" ) diff --git a/.github/actions/run-hostbusters-ipv6-test-suites/action.yaml b/.github/actions/run-hostbusters-ipv6-test-suites/action.yaml index b499890b9..dd40484e5 100644 --- a/.github/actions/run-hostbusters-ipv6-test-suites/action.yaml +++ b/.github/actions/run-hostbusters-ipv6-test-suites/action.yaml @@ -86,14 +86,29 @@ runs: fi gotestsum --format standard-verbose \ - --packages=github.com/rancher/tests/validation/snapshot/ipv6 \ + --packages=github.com/rancher/tests/validation/snapshot/rke2/ipv6 \ --junitfile results.xml \ - --jsonfile results_snapshot.json \ + --jsonfile results_rke2_snapshot.json \ -- -timeout=5h -tags=recurring -v - snap_restore_exit=$? - echo "snap_restore_exit=$snap_restore_exit" >> "$GITHUB_ENV" - cp results_snapshot.json results.json + rke2_snapshot_exit=$? + echo "rke2_snapshot_exit=$rke2_snapshot_exit" >> "$GITHUB_ENV" + cp results_rke2_snapshot.json results.json + + if [[ "${{ inputs.reporting }}" == "true" ]]; then + ./validation/pipeline/scripts/build_qase_reporter_v2.sh; + ./validation/reporter + fi + + gotestsum --format standard-verbose \ + --packages=github.com/rancher/tests/validation/snapshot/k3s/ipv6 \ + --junitfile results.xml \ + --jsonfile results_k3s_snapshot.json \ + -- -timeout=5h -tags=recurring -v + + k3s_snapshot_exit=$? + echo "k3s_snapshot_exit=$k3s_snapshot_exit" >> "$GITHUB_ENV" + cp results_k3s_snapshot.json results.json if [[ "${{ inputs.reporting }}" == "true" ]]; then ./validation/pipeline/scripts/build_qase_reporter_v2.sh; @@ -125,7 +140,7 @@ runs: [delete_exit]="Delete Cluster:results_delete.json" [node_scale_exit]="Node Scaling:results_node_scale.json" [prov_exit]="Provisioning:results_prov.json" - [snap_restore_exit]="Snapshot Restore:results_snapshot.json" + [rke2_snapshot_exit]="RKE2:results_rke2_snapshot.json" [upgrade_exit]="Kubernetes Upgrade:results_upgrade.json" ) diff --git a/.github/actions/run-hostbusters-test-suites/action.yaml b/.github/actions/run-hostbusters-test-suites/action.yaml index 930891cb9..5e64639ba 100644 --- a/.github/actions/run-hostbusters-test-suites/action.yaml +++ b/.github/actions/run-hostbusters-test-suites/action.yaml @@ -101,14 +101,29 @@ runs: fi gotestsum --format standard-verbose \ - --packages=github.com/rancher/tests/validation/snapshot/rke2k3s \ + --packages=github.com/rancher/tests/validation/snapshot/rke2 \ --junitfile results.xml \ - --jsonfile results_snapshot.json \ + --jsonfile results_rke2_snapshot.json \ -- -timeout=5h -tags=recurring -v - snapshot_exit=$? - echo "snapshot_exit=$snapshot_exit" >> "$GITHUB_ENV" - cp results_snapshot.json results.json + rke2_snapshot_exit=$? + echo "rke2_snapshot_exit=$rke2_snapshot_exit" >> "$GITHUB_ENV" + cp results_rke2_snapshot.json results.json + + if [[ "${{ inputs.reporting }}" == "true" ]]; then + ./validation/pipeline/scripts/build_qase_reporter_v2.sh; + ./validation/reporter + fi + + gotestsum --format standard-verbose \ + --packages=github.com/rancher/tests/validation/snapshot/k3s \ + --junitfile results.xml \ + --jsonfile results_k3s_snapshot.json \ + -- -timeout=5h -tags=recurring -v + + k3s_snapshot_exit=$? + echo "k3s_snapshot_exit=$k3s_snapshot_exit" >> "$GITHUB_ENV" + cp results_k3s_snapshot.json results.json if [[ "${{ inputs.reporting }}" == "true" ]]; then ./validation/pipeline/scripts/build_qase_reporter_v2.sh; @@ -141,7 +156,8 @@ runs: [nodescaling_exit]="Node Scaling:results_node_scale.json" [k3s_exit]="K3S:results_k3s.json" [rke2_exit]="RKE2:results_rke2.json" - [snapshot_exit]="Snapshot Restore:results_snapshot.json" + [k3s_snapshot_exit]="K3S:results_k3s_snapshot.json" + [rke2_snapshot_exit]="RKE2:results_rke2_snapshot.json" [upgrade_exit]="Kubernetes Upgrade:results_upgrade.json" ) diff --git a/actions/config/defaults/clusterconfig.go b/actions/config/defaults/clusterconfig.go index 2b1607fd6..111de790e 100644 --- a/actions/config/defaults/clusterconfig.go +++ b/actions/config/defaults/clusterconfig.go @@ -8,6 +8,7 @@ import ( const ( ClusterConfigKey = "clusterConfig" + RancherConfigKey = "rancher" AWSEC2Configs = "awsEC2Configs" K8SVersionKey = "kubernetesVersion" CNIKey = "cni" diff --git a/validation/snapshot/README.md b/validation/snapshot/README.md index 8057f9ec0..ee1ff05f5 100644 --- a/validation/snapshot/README.md +++ b/validation/snapshot/README.md @@ -17,132 +17,5 @@ Please see below for more details for your config. Please note that the config c 1. [Getting Started](#Getting-Started) 2. [Running Tests](#Running-Tests) -## Getting Started -Please see an example config below using AWS as the node provider to first provision the cluster: - -```yaml -rancher: - host: "" - adminToken: "" - insecure: true - -provisioningInput: - cni: ["calico"] - providers: ["aws"] - nodeProviders: ["ec2"] - -clusterConfig: - cni: "calico" - provider: "aws" - nodeProvider: "ec2" - -awsCredentials: - secretKey: "" - accessKey: "" - defaultRegion: "us-east-2" - -awsMachineConfigs: - region: "us-east-2" - awsMachineConfig: - - roles: ["etcd", "controlplane", "worker"] - ami: "" - instanceType: "" - sshUser: "" - vpcId: "" - volumeType: "" - zone: "a" - retries: "" - rootSize: "" - securityGroup: [""] - -amazonec2Config: - accessKey: "" - ami: "" - blockDurationMinutes: "0" - encryptEbsVolume: false - httpEndpoint: "enabled" - httpTokens: "optional" - iamInstanceProfile: "" - insecureTransport: false - instanceType: "" - monitoring: false - privateAddressOnly: false - region: "us-east-2" - requestSpotInstance: true - retries: "" - rootSize: "" - secretKey: "" - securityGroup: [""] - securityGroupReadonly: false - spotPrice: "" - sshKeyContents: "" - sshUser: "" - subnetId: "" - tags: "" - type: "amazonec2Config" - useEbsOptimizedInstance: false - usePrivateAddress: false - volumeType: "" - vpcId: "" - zone: "a" -``` - -If you plan to run the `snapshot_restore_wins_test.go`, your config must include the following: - -```yaml -awsEC2Configs: - region: "us-east-2" - awsSecretAccessKey: "" - awsAccessKeyID: "" - awsEC2Config: - - instanceType: "" - awsRegionAZ: "" - awsAMI: "" - awsSecurityGroups: [""] - awsSSHKeyName: "" - awsCICDInstanceTag: "" - awsIAMProfile: "" - awsCICDInstanceTag: "" - awsUser: "" - volumeSize: - roles: ["etcd", "controlplane", "worker"] - - instanceType: "" - awsRegionAZ: "" - awsAMI: "" - awsSecurityGroups: [""] - awsSSHKeyName: "" - awsCICDInstanceTag: "" - awsUser: "Administrator" - volumeSize: - roles: ["windows"] -sshPath: - sshPath: "/" -``` - -### Running Tests - -#### RKE1 -`gotestsum --format standard-verbose --packages=github.com/rancher/tests/validation/snapshot/rke1 --junitfile results.xml -- -timeout=60m -tags=validation -v -run "TestRKE1SnapshotRestoreTestSuite/TestRKE1SnapshotRestore"` - -#### RKE2/K3s -`gotestsum --format standard-verbose --packages=github.com/rancher/tests/validation/snapshot/rke2k3s --junitfile results.xml -- -timeout=60m -tags=validation -v -run "TestSnapshotRestoreTestSuite/TestSnapshotRestore"` - -#### IPv6 -`gotestsum --format standard-verbose --packages=github.com/rancher/tests/validation/snapshot/ipv6 --junitfile results.xml -- -timeout=60m -tags=validation -v -run "TestSnapshotIPv6RestoreTestSuite/TestSnapshotIPv6Restore"` - -#### Dualstack -`gotestsum --format standard-verbose --packages=github.com/rancher/tests/validation/snapshot/dualstack --junitfile results.xml -- -timeout=60m -tags=validation -v -run "TestSnapshotDualstackRestoreTestSuite/TestSnapshotDualstackRestore"` - -#### S3 -`gotestsum --format standard-verbose --packages=github.com/rancher/tests/validation/snapshot/rke1 --junitfile results.xml -- -timeout=60m -tags=validation -v -run "TestRKE1S3SnapshotRestoreTestSuite/TestRKE1S3SnapshotRestore"` \ -`gotestsum --format standard-verbose --packages=github.com/rancher/tests/validation/snapshot/rke2k3s --junitfile results.xml -- -timeout=60m -tags=validation -v -run "TestS3SnapshotRestoreTestSuite/TestS3SnapshotRestore"` - -#### Windows -`gotestsum --format standard-verbose --packages=github.com/rancher/tests/validation/snapshot/rke2k3s --junitfile results.xml -- -timeout=60m -tags=validation -v -run "TestSnapshotRestoreWindowsTestSuite/TestSnapshotRestoreWindows"` - -#### Retention -`gotestsum --format standard-verbose --packages=github.com/rancher/tests/validation/snapshot/rke2k3s --junitfile results.xml -- -timeout=60m -tags=validation -v -run "TestSnapshotRetentionTestSuite/TestAutomaticSnapshotRetention"` - -#### Recurring -`gotestsum --format standard-verbose --packages=github.com/rancher/tests/validation/snapshot/rke1 --junitfile results.xml -- -timeout=60m -tags=validation -v -run "TestRKE1SnapshotRecurringTestSuite/TestRKE1SnapshotRecurringRestores"` \ -`gotestsum --format standard-verbose --packages=github.com/rancher/tests/validation/snapshot/rke2k3s --junitfile results.xml -- -timeout=60m -tags=validation -v -run "TestSnapshotRecurringTestSuite/TestSnapshotRecurringRestores"` \ No newline at end of file +## Cluster Configuration +If the user doesn't provide an existing cluster via the rancher.clusterName you can find configuration details for node driver/custom clusters here: [provisioning](../provisioning/README.md) \ No newline at end of file diff --git a/validation/snapshot/k3s/README.md b/validation/snapshot/k3s/README.md new file mode 100644 index 000000000..04e9162a5 --- /dev/null +++ b/validation/snapshot/k3s/README.md @@ -0,0 +1,148 @@ +# K3S Snapshot Configs + +## Table of Contents +1. [Prerequisites](../README.md) +2. [Tests Cases](#Test-Cases) +3. [Configurations](#Configurations) +4. [Configuration Defaults](#defaults) +5. [Logging Levels](#Logging) +6. [Back to general snapshot](../README.md) + +## Test Cases +All of the test cases in this package are listed below, keep in mind that all configuration for these tests have built in defaults [Configuration Defaults](#defaults). These tests will provision a cluster if one is not provided via the rancher.ClusterName field. + +### Recurring Snapshot Test + +#### Description: +The recurring snapshot test verifies that a cluster can create a series of snapshots. All configurations are not required if an already provisioned cluster is provided to the test. + +#### Required Configurations: +1. [Cloud Credential](#cloud-credential-config) +2. [Cluster Config](#cluster-config) +3. [Machine Config](#machine-config) + +#### Table Tests: +1. `K3S_Recurring_Restores` + +#### Run Commands: +1. `gotestsum --format standard-verbose --packages=github.com/rancher/tests/validation/snapshot/k3s --junitfile results.xml --jsonfile results.json -- -tags=validation -run TestSnapshotRecurringTestSuite/TestSnapshotRecurringRestores -timeout=1h -v` + +### Snapshot Restore Test + +#### Description: +The snapshot restore test validates that snapshots can be created and restored without any failures or longterm disruption to workloads. + +#### Required Configurations: +1. [Cloud Credential](#cloud-credential-config) +2. [Cluster Config](#cluster-config) +3. [Machine Config](#machine-config) + +#### Table Tests: +1. `K3S_Restore_ETCD` +2. `K3S_Restore_ETCD_K8sVersion` +3. `K3S_Restore_ETCD` + +#### Run Commands: +1. `gotestsum --format standard-verbose --packages=github.com/rancher/tests/validation/snapshot/k3s --junitfile results.xml --jsonfile results.json -- -tags=validation -run TestSnapshotRestoreTestSuite/TestSnapshotRestore -timeout=1h -v` + +### Snapshot Retention Test + +#### Description: +The snapshot retention test validates that the configured number of snapshots are retained and older snapshots are deleted as expected. + +#### Required Configurations: +1. [Cloud Credential](#cloud-credential-config) +2. [Cluster Config](#cluster-config) +3. [Machine Config](#machine-config) + +#### Table Tests: +1. `K3S_Snapshot_Retention` + +#### Run Commands: +1. `gotestsum --format standard-verbose --packages=github.com/rancher/tests/validation/snapshot/k3s --junitfile results.xml --jsonfile results.json -- -tags=validation -run TestSnapshotRetentionTestSuite/TestSnapshotRetention -timeout=1h -v` + +### Snapshot S3 Test + +#### Description: +The snapshot S3 test validates that snapshots can be stored and restored from an S3 bucket. + +#### Required Configurations: +1. [Cloud Credential](#cloud-credential-config) +2. [Cluster Config](#cluster-config) +3. [Machine Config](#machine-config) +4. S3 configuration in etcd section of cluster config + +#### Table Tests: +1. `K3S_Snapshot_S3` + +#### Run Commands: +1. `gotestsum --format standard-verbose --packages=github.com/rancher/tests/validation/snapshot/k3s --junitfile results.xml --jsonfile results.json -- -tags=validation -run TestSnapshotS3TestSuite/TestSnapshotS3 -timeout=1h -v` + +### Dualstack Snapshot Restore Test + +#### Description: +The dualstack snapshot restore test validates that a cluster configured for dualstack networking can create and restore snapshots successfully. + +#### Required Configurations: +1. [Cloud Credential](#cloud-credential-config) +2. [Cluster Config](#cluster-config) +3. [Machine Config](#machine-config) + +#### Table Tests: +1. `K3S_Dualstack_Snapshot_Restore` + +#### Run Commands: +1. `gotestsum --format standard-verbose --packages=github.com/rancher/tests/validation/snapshot/k3s/dualstack --junitfile results.xml --jsonfile results.json -- -tags=validation -run TestSnapshotDualstackRestoreTestSuite/TestSnapshotDualstackRestore -timeout=1h -v` + +### IPv6 Snapshot Tests + +#### Description: +The IPv6 snapshot tests validate snapshot creation and restore functionality on clusters configured with IPv6 networking. + +#### Required Configurations: +1. [Cloud Credential](#cloud-credential-config) +2. [Cluster Config](#cluster-config) (with IPv6 settings) +3. [Machine Config](#machine-config) + +#### Table Tests: +1. `K3S_IPv6_Restore_ETCD` +2. `K3S_IPv6_Restore_ETCD_K8sVersion` +3. `K3S_IPv6_Restore_Upgrade_Strategy` + +#### Run Commands: +1. `gotestsum --format standard-verbose --packages=github.com/rancher/tests/validation/snapshot/k3s/ipv6 --junitfile results.xml --jsonfile results.json -- -tags=validation -run TestSnapshotIPv6RestoreTestSuite/TestSnapshotIPv6Restore -timeout=2h -v` + +## Configurations + +### Existing cluster: +```yaml +rancher: + host: + adminToken: + clusterName: "" + cleanup: true + insecure: true +``` + +### Provisioning cluster +This test will create a cluster if one is not provided, see to configure a node driver OR custom cluster depending on the snapshot test [k3s provisioning](../../provisioning/k3s/README.md) + +## Defaults +This package contains a defaults folder which contains default test configuration data for non-sensitive fields. The goal of this data is to: +1. Reduce the number of fields the user needs to provide in the cattle_config file. +2. Reduce the amount of yaml data that needs to be stored in our pipelines. +3. Make it easier to run tests + +Any data the user provides will override these defaults which are stored here: [defaults](defaults/defaults.yaml). + +## Logging +This package supports several logging levels. You can set the logging levels via the cattle config and all levels above the provided level will be logged while all logs below that logging level will be omitted. + +```yaml +logging: + level: "trace" #trace debug, info, warning, error +``` + +## Additional +1. If the tests passes immediately without warning, try adding the `-count=1` or run `go clean -cache`. This will avoid previous results from interfering with the new test run. +2. All of the tests utilize parallelism when running for more finite control of how things are run in parallel use the -p and -parallel. \ No newline at end of file diff --git a/validation/snapshot/dualstack/defaults/defaults.yaml b/validation/snapshot/k3s/dualstack/defaults/defaults.yaml similarity index 100% rename from validation/snapshot/dualstack/defaults/defaults.yaml rename to validation/snapshot/k3s/dualstack/defaults/defaults.yaml diff --git a/validation/snapshot/k3s/dualstack/schemas/hostbusters_schemas.yaml b/validation/snapshot/k3s/dualstack/schemas/hostbusters_schemas.yaml new file mode 100644 index 000000000..f537aeed3 --- /dev/null +++ b/validation/snapshot/k3s/dualstack/schemas/hostbusters_schemas.yaml @@ -0,0 +1,86 @@ +- suite: Go Automation/Snapshot/k3s/dualstack + projects: [RRT, RM] + cases: + - description: Creates and restores a snapshot on an existing cluster + title: K3S_Dualstack_Restore_ETCD + priority: 4 + type: 8 + is_flaky: 0 + automation: 2 + steps: + - action: Create a snapshot + expectedresult: "" + data: "" + position: 1 + attachments: [] + - action: Restore to the snapshot + expectedresult: "" + data: "" + position: 2 + attachments: [] + - action: Verify cluster state + expectedresult: "" + data: "" + position: 3 + attachments: [] + custom_field: + "14": Validation + "18": Hostbusters + + - description: Upgrades the kubernetes version and restores a snapshot + title: K3S_Dualstack_Restore_ETCD_K8sVersion + priority: 4 + type: 8 + is_flaky: 0 + automation: 2 + steps: + - action: Create a snapshot + expectedresult: "" + data: "" + position: 1 + attachments: [] + - action: Upgrade the kubernetes version + expectedresult: "" + data: "" + position: 2 + attachments: [] + - action: Restore to the snapshot + expectedresult: "" + data: "" + position: 3 + attachments: [] + - action: Verify cluster state + expectedresult: "" + data: "" + position: 4 + attachments: [] + attachments: [] + custom_field: + "14": Validation + "18": Hostbusters + + - description: Creates and restores a snapshot with upgrade strategy + title: K3S_Dualstack_Restore_Upgrade_Strategy + priority: 5 + type: 8 + is_flaky: 0 + automation: 2 + steps: + - action: Create a snapshot + expectedresult: "" + data: "" + position: 1 + attachments: [] + - action: Restore the snapshot + expectedresult: "" + data: "" + position: 2 + attachments: [] + - action: Verify cluster state + expectedresult: "" + data: "" + position: 3 + attachments: [] + custom_field: + "14": Validation + "18": Hostbusters \ No newline at end of file diff --git a/validation/snapshot/k3s/dualstack/snapshot_restore_test.go b/validation/snapshot/k3s/dualstack/snapshot_restore_test.go new file mode 100644 index 000000000..0fac3323e --- /dev/null +++ b/validation/snapshot/k3s/dualstack/snapshot_restore_test.go @@ -0,0 +1,142 @@ +//go:build validation || recurring + +package dualstack + +import ( + "os" + "testing" + + "github.com/rancher/shepherd/clients/rancher" + v1 "github.com/rancher/shepherd/clients/rancher/v1" + "github.com/rancher/shepherd/extensions/defaults/stevetypes" + "github.com/rancher/shepherd/pkg/config" + "github.com/rancher/shepherd/pkg/config/operations" + "github.com/rancher/shepherd/pkg/session" + "github.com/rancher/tests/actions/clusters" + "github.com/rancher/tests/actions/config/defaults" + "github.com/rancher/tests/actions/etcdsnapshot" + "github.com/rancher/tests/actions/logging" + "github.com/rancher/tests/actions/provisioning" + "github.com/rancher/tests/actions/qase" + resources "github.com/rancher/tests/validation/provisioning/resources/provisioncluster" + standard "github.com/rancher/tests/validation/provisioning/resources/standarduser" + "github.com/sirupsen/logrus" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" +) + +const ( + containerImage = "nginx" +) + +type SnapshotDualstackRestoreTestSuite struct { + suite.Suite + session *session.Session + client *rancher.Client + cattleConfig map[string]any + snapshotConfigs []*etcdsnapshot.Config + cluster *v1.SteveAPIObject +} + +func (s *SnapshotDualstackRestoreTestSuite) TearDownSuite() { + s.session.Cleanup() +} + +func (s *SnapshotDualstackRestoreTestSuite) SetupSuite() { + testSession := session.NewSession() + s.session = testSession + + client, err := rancher.NewClient("", s.session) + require.NoError(s.T(), err) + + s.client = client + + standardUserClient, _, _, err := standard.CreateStandardUser(s.client) + require.NoError(s.T(), err) + + s.cattleConfig = config.LoadConfigFromFile(os.Getenv(config.ConfigEnvironmentKey)) + + s.cattleConfig, err = defaults.LoadPackageDefaults(s.cattleConfig, "") + require.NoError(s.T(), err) + + loggingConfig := new(logging.Logging) + operations.LoadObjectFromMap(logging.LoggingKey, s.cattleConfig, loggingConfig) + + err = logging.SetLogger(loggingConfig) + require.NoError(s.T(), err) + + clusterConfig := new(clusters.ClusterConfig) + operations.LoadObjectFromMap(defaults.ClusterConfigKey, s.cattleConfig, clusterConfig) + + rancherConfig := new(rancher.Config) + operations.LoadObjectFromMap(defaults.RancherConfigKey, s.cattleConfig, rancherConfig) + + if rancherConfig.ClusterName == "" { + provider := provisioning.CreateProvider(clusterConfig.Provider) + machineConfigSpec := provider.LoadMachineConfigFunc(s.cattleConfig) + + logrus.Info("Provisioning K3S cluster") + s.cluster, err = resources.ProvisionRKE2K3SCluster(s.T(), standardUserClient, defaults.K3S, provider, *clusterConfig, machineConfigSpec, nil, true, false) + require.NoError(s.T(), err) + } else { + logrus.Infof("Using existing cluster %s", rancherConfig.ClusterName) + s.cluster, err = client.Steve.SteveType(stevetypes.Provisioning).ByID("fleet-default/" + s.client.RancherConfig.ClusterName) + require.NoError(s.T(), err) + } +} + +func snapshotRestoreConfigs() []*etcdsnapshot.Config { + return []*etcdsnapshot.Config{ + { + UpgradeKubernetesVersion: "", + SnapshotRestore: "none", + RecurringRestores: 1, + }, + { + UpgradeKubernetesVersion: "", + SnapshotRestore: "kubernetesVersion", + RecurringRestores: 1, + }, + { + UpgradeKubernetesVersion: "", + SnapshotRestore: "all", + ControlPlaneConcurrencyValue: "15%", + WorkerConcurrencyValue: "20%", + RecurringRestores: 1, + }, + } +} + +func (s *SnapshotDualstackRestoreTestSuite) TestSnapshotDualstackRestore() { + snapshotRestoreConfig := snapshotRestoreConfigs() + tests := []struct { + name string + etcdSnapshot *etcdsnapshot.Config + cluster *v1.SteveAPIObject + }{ + {"K3S_Dualstack_Restore_ETCD", snapshotRestoreConfig[0], s.cluster}, + {"K3S_Dualstack_Restore_ETCD_K8sVersion", snapshotRestoreConfig[1], s.cluster}, + {"K3S_Dualstack_Restore_Upgrade_Strategy", snapshotRestoreConfig[2], s.cluster}, + } + + for _, tt := range tests { + var err error + s.Run(tt.name, func() { + cluster, err := s.client.Steve.SteveType(stevetypes.Provisioning).ByID(tt.cluster.ID) + require.NoError(s.T(), err) + + err = etcdsnapshot.CreateAndValidateSnapshotRestore(s.client, cluster.Name, tt.etcdSnapshot, containerImage) + require.NoError(s.T(), err) + }) + + params := provisioning.GetProvisioningSchemaParams(s.client, s.cattleConfig) + err = qase.UpdateSchemaParameters(tt.name, params) + if err != nil { + logrus.Warningf("Failed to upload schema parameters %s", err) + } + } +} + +func TestSnapshotDualstackRestoreTestSuite(t *testing.T) { + suite.Run(t, new(SnapshotDualstackRestoreTestSuite)) +} diff --git a/validation/snapshot/ipv6/defaults/defaults.yaml b/validation/snapshot/k3s/ipv6/defaults/defaults.yaml similarity index 100% rename from validation/snapshot/ipv6/defaults/defaults.yaml rename to validation/snapshot/k3s/ipv6/defaults/defaults.yaml diff --git a/validation/snapshot/k3s/ipv6/schemas/hostbusters_schemas.yaml b/validation/snapshot/k3s/ipv6/schemas/hostbusters_schemas.yaml new file mode 100644 index 000000000..2babf3e76 --- /dev/null +++ b/validation/snapshot/k3s/ipv6/schemas/hostbusters_schemas.yaml @@ -0,0 +1,86 @@ +- suite: Go Automation/Snapshot/k3s/ipv6 + projects: [RRT, RM] + cases: + - description: Creates and restores a snapshot on an existing cluster + title: K3S_Restore_ETCD + priority: 4 + type: 8 + is_flaky: 0 + automation: 2 + steps: + - action: Create a snapshot + expectedresult: "" + data: "" + position: 1 + attachments: [] + - action: Restore to the snapshot + expectedresult: "" + data: "" + position: 2 + attachments: [] + - action: Verify cluster state + expectedresult: "" + data: "" + position: 3 + attachments: [] + custom_field: + "14": Validation + "18": Hostbusters + + - description: Upgrades the kubernetes version and restores a snapshot + title: K3S_Restore_ETCD_K8sVersion + priority: 4 + type: 8 + is_flaky: 0 + automation: 2 + steps: + - action: Create a snapshot + expectedresult: "" + data: "" + position: 1 + attachments: [] + - action: Upgrade the kubernetes version + expectedresult: "" + data: "" + position: 2 + attachments: [] + - action: Restore to the snapshot + expectedresult: "" + data: "" + position: 3 + attachments: [] + - action: Verify cluster state + expectedresult: "" + data: "" + position: 4 + attachments: [] + attachments: [] + custom_field: + "14": Validation + "18": Hostbusters + + - description: Creates and restores a snapshot with upgrade strategy + title: K3S_Restore_Upgrade_Strategy + priority: 5 + type: 8 + is_flaky: 0 + automation: 2 + steps: + - action: Create a snapshot + expectedresult: "" + data: "" + position: 1 + attachments: [] + - action: Restore the snapshot + expectedresult: "" + data: "" + position: 2 + attachments: [] + - action: Verify cluster state + expectedresult: "" + data: "" + position: 3 + attachments: [] + custom_field: + "14": Validation + "18": Hostbusters \ No newline at end of file diff --git a/validation/snapshot/ipv6/snapshot_restore_test.go b/validation/snapshot/k3s/ipv6/snapshot_restore_test.go similarity index 71% rename from validation/snapshot/ipv6/snapshot_restore_test.go rename to validation/snapshot/k3s/ipv6/snapshot_restore_test.go index d57efa4ce..79f83406d 100644 --- a/validation/snapshot/ipv6/snapshot_restore_test.go +++ b/validation/snapshot/k3s/ipv6/snapshot_restore_test.go @@ -37,8 +37,7 @@ type SnapshotIPv6RestoreTestSuite struct { session *session.Session client *rancher.Client cattleConfig map[string]any - rke2Cluster *v1.SteveAPIObject - k3sCluster *v1.SteveAPIObject + cluster *v1.SteveAPIObject } func (s *SnapshotIPv6RestoreTestSuite) TearDownSuite() { @@ -71,28 +70,33 @@ func (s *SnapshotIPv6RestoreTestSuite) SetupSuite() { clusterConfig := new(clusters.ClusterConfig) operations.LoadObjectFromMap(defaults.ClusterConfigKey, s.cattleConfig, clusterConfig) + rancherConfig := new(rancher.Config) + operations.LoadObjectFromMap(defaults.RancherConfigKey, s.cattleConfig, rancherConfig) + provider := provisioning.CreateProvider(clusterConfig.Provider) machineConfigSpec := provider.LoadMachineConfigFunc(s.cattleConfig) - logrus.Info("Provisioning RKE2 cluster") - s.rke2Cluster, err = resources.ProvisionRKE2K3SCluster(s.T(), standardUserClient, extClusters.RKE2ClusterType.String(), provider, *clusterConfig, machineConfigSpec, nil, false, false) - require.NoError(s.T(), err) - - if clusterConfig.Advanced == nil { - clusterConfig.Advanced = &provisioninginput.Advanced{} - } + if rancherConfig.ClusterName == "" { + if clusterConfig.Advanced == nil { + clusterConfig.Advanced = &provisioninginput.Advanced{} + } - if clusterConfig.Advanced.MachineGlobalConfig == nil { - clusterConfig.Advanced.MachineGlobalConfig = &rkev1.GenericMap{ - Data: map[string]any{}, + if clusterConfig.Advanced.MachineGlobalConfig == nil { + clusterConfig.Advanced.MachineGlobalConfig = &rkev1.GenericMap{ + Data: map[string]any{}, + } } - } - clusterConfig.Advanced.MachineGlobalConfig.Data["flannel-ipv6-masq"] = true + clusterConfig.Advanced.MachineGlobalConfig.Data["flannel-ipv6-masq"] = true - logrus.Info("Provisioning K3s cluster") - s.k3sCluster, err = resources.ProvisionRKE2K3SCluster(s.T(), standardUserClient, extClusters.K3SClusterType.String(), provider, *clusterConfig, machineConfigSpec, nil, false, false) - require.NoError(s.T(), err) + logrus.Info("Provisioning K3s cluster") + s.cluster, err = resources.ProvisionRKE2K3SCluster(s.T(), standardUserClient, extClusters.K3SClusterType.String(), provider, *clusterConfig, machineConfigSpec, nil, false, false) + require.NoError(s.T(), err) + } else { + logrus.Infof("Using existing cluster %s", rancherConfig.ClusterName) + s.cluster, err = client.Steve.SteveType(stevetypes.Provisioning).ByID("fleet-default/" + s.client.RancherConfig.ClusterName) + require.NoError(s.T(), err) + } } func snapshotRestoreConfigs() []*etcdsnapshot.Config { @@ -123,22 +127,20 @@ func (s *SnapshotIPv6RestoreTestSuite) TestSnapshotIPv6Restore() { tests := []struct { name string etcdSnapshot *etcdsnapshot.Config - clusterID string + cluster *v1.SteveAPIObject }{ - {"RKE2_IPv6_Restore_ETCD", snapshotRestoreConfigRKE2[0], s.rke2Cluster.ID}, - {"RKE2_IPv6_Restore_ETCD_K8sVersion", snapshotRestoreConfigRKE2[1], s.rke2Cluster.ID}, - {"RKE2_IPv6_Restore_Upgrade_Strategy", snapshotRestoreConfigRKE2[2], s.rke2Cluster.ID}, - {"K3S_IPv6_Restore_ETCD", snapshotRestoreConfigRKE2[0], s.k3sCluster.ID}, - {"K3S_IPv6_Restore_ETCD_K8sVersion", snapshotRestoreConfigRKE2[1], s.k3sCluster.ID}, - {"K3S_IPv6_Restore_Upgrade_Strategy", snapshotRestoreConfigRKE2[2], s.k3sCluster.ID}, + {"K3S_IPv6_Restore_ETCD", snapshotRestoreConfigRKE2[0], s.cluster}, + {"K3S_IPv6_Restore_ETCD_K8sVersion", snapshotRestoreConfigRKE2[1], s.cluster}, + {"K3S_IPv6_Restore_Upgrade_Strategy", snapshotRestoreConfigRKE2[2], s.cluster}, } for _, tt := range tests { - cluster, err := s.client.Steve.SteveType(stevetypes.Provisioning).ByID(tt.clusterID) - require.NoError(s.T(), err) - + var err error s.Run(tt.name, func() { - err := etcdsnapshot.CreateAndValidateSnapshotRestore(s.client, cluster.Name, tt.etcdSnapshot, containerImage) + cluster, err := s.client.Steve.SteveType(stevetypes.Provisioning).ByID(tt.cluster.ID) + require.NoError(s.T(), err) + + err = etcdsnapshot.CreateAndValidateSnapshotRestore(s.client, cluster.Name, tt.etcdSnapshot, containerImage) require.NoError(s.T(), err) }) diff --git a/validation/snapshot/ipv6/schemas/hostbusters_schemas.yaml b/validation/snapshot/k3s/schemas/hostbusters_schemas.yaml similarity index 55% rename from validation/snapshot/ipv6/schemas/hostbusters_schemas.yaml rename to validation/snapshot/k3s/schemas/hostbusters_schemas.yaml index e69673011..688934629 100644 --- a/validation/snapshot/ipv6/schemas/hostbusters_schemas.yaml +++ b/validation/snapshot/k3s/schemas/hostbusters_schemas.yaml @@ -1,8 +1,8 @@ -- suite: Go Automation/Snapshot +- suite: Go Automation/Snapshot/k3s projects: [RRT, RM] cases: - description: Creates and restores a snapshot on an existing cluster - title: RKE2_Restore_ETCD + title: K3S_Restore_ETCD priority: 4 type: 8 is_flaky: 0 @@ -28,7 +28,7 @@ "18": Hostbusters - description: Upgrades the kubernetes version and restores a snapshot - title: RKE2_Restore_ETCD_K8sVersion + title: K3S_Restore_ETCD_K8sVersion priority: 4 type: 8 is_flaky: 0 @@ -54,13 +54,69 @@ data: "" position: 4 attachments: [] + custom_field: + "14": Validation + "18": Hostbusters + + - description: Creates and restores a S3 snapshot on an existing cluster + title: K3S_S3_Restore + priority: 4 + type: 8 + is_flaky: 0 + automation: 2 + steps: + - action: Create rancher provider credentials + expectedresult: "" + data: "" + position: 1 + attachments: [] + - action: Create an S3 snapshot + expectedresult: "" + data: "" + position: 2 + attachments: [] + - action: Restore to the S3 snapshot + expectedresult: "" + data: "" + position: 3 + attachments: [] + - action: Verify cluster state + expectedresult: "" + data: "" + position: 4 + attachments: [] + custom_field: + "14": Validation + "18": Hostbusters + + - description: Creates snapshots until the retention limit is reached + title: K3S_Retention_Limit + priority: 6 + type: 8 + is_flaky: 0 + automation: 2 + steps: + - action: Set the retention limit and interval + expectedresult: "" + data: "" + position: 1 + attachments: [] + - action: Wait for retention limit to be reached + expectedresult: "" + data: "" + position: 2 + attachments: [] + - action: Verify snapshot limit is respected + expectedresult: "" + data: "" + position: 3 attachments: [] custom_field: "14": Validation "18": Hostbusters - description: Creates and restores a snapshot with upgrade strategy - title: RKE2_Restore_Upgrade_Strategy + title: K3S_Restore_Upgrade_Strategy priority: 5 type: 8 is_flaky: 0 @@ -85,9 +141,9 @@ "14": Validation "18": Hostbusters - - description: Creates and restores a snapshot on an existing cluster - title: K3S_Restore_ETCD - priority: 4 + - description: Replaces all control plane nodes on a cluster and restores a snapshot + title: K3S_Replace_Control_Plane_Nodes + priority: 5 type: 8 is_flaky: 0 automation: 2 @@ -97,23 +153,28 @@ data: "" position: 1 attachments: [] - - action: Restore to the snapshot + - action: Replace control plane nodes expectedresult: "" data: "" position: 2 attachments: [] - - action: Verify cluster state + - action: Restore the snapshot expectedresult: "" data: "" position: 3 attachments: [] + - action: Verify cluster state + expectedresult: "" + data: "" + position: 4 + attachments: [] custom_field: "14": Validation "18": Hostbusters - - description: Upgrades the kubernetes version and restores a snapshot - title: K3S_Restore_ETCD_K8sVersion - priority: 4 + - description: Replaces all etcd nodes on a cluster and restores a snapshot + title: K3S_Replace_ETCD_Nodes + priority: 5 type: 8 is_flaky: 0 automation: 2 @@ -123,12 +184,12 @@ data: "" position: 1 attachments: [] - - action: Upgrade the kubernetes version + - action: Replace etcd nodes expectedresult: "" data: "" position: 2 attachments: [] - - action: Restore to the snapshot + - action: Restore the snapshot expectedresult: "" data: "" position: 3 @@ -138,18 +199,48 @@ data: "" position: 4 attachments: [] - attachments: [] custom_field: "14": Validation "18": Hostbusters - - description: Creates and restores a snapshot with upgrade strategy - title: K3S_Restore_Upgrade_Strategy + - description: Replaces all worker nodes on a cluster and restores a snapshot + title: K3S_Replace_Worker_Nodes priority: 5 type: 8 is_flaky: 0 automation: 2 steps: + - action: Create a snapshot + expectedresult: "" + data: "" + position: 1 + attachments: [] + - action: Replace worker nodes + expectedresult: "" + data: "" + position: 2 + attachments: [] + - action: Restore the snapshot + expectedresult: "" + data: "" + position: 3 + attachments: [] + - action: Verify cluster state + expectedresult: "" + data: "" + position: 4 + attachments: [] + custom_field: + "14": Validation + "18": Hostbusters + + - description: Creates and restores a series of snapshots + title: K3S_Recurring_Restores + priority: 6 + type: 8 + is_flaky: 0 + automation: 2 + steps: - action: Create a snapshot expectedresult: "" data: "" @@ -165,6 +256,11 @@ data: "" position: 3 attachments: [] + - action: Repeat steps 1-3 + expectedresult: "" + data: "" + position: 4 + attachments: [] custom_field: "14": Validation "18": Hostbusters \ No newline at end of file diff --git a/validation/snapshot/rke2k3s/snapshot_recurring_test.go b/validation/snapshot/k3s/snapshot_recurring_test.go similarity index 74% rename from validation/snapshot/rke2k3s/snapshot_recurring_test.go rename to validation/snapshot/k3s/snapshot_recurring_test.go index 2d4886712..53799a06f 100644 --- a/validation/snapshot/rke2k3s/snapshot_recurring_test.go +++ b/validation/snapshot/k3s/snapshot_recurring_test.go @@ -1,6 +1,6 @@ //go:build validation || recurring -package rke2k3s +package k3s import ( "os" @@ -31,8 +31,7 @@ type SnapshotRecurringTestSuite struct { session *session.Session client *rancher.Client cattleConfig map[string]any - rke2Cluster *v1.SteveAPIObject - k3sCluster *v1.SteveAPIObject + cluster *v1.SteveAPIObject } func (s *SnapshotRecurringTestSuite) TearDownSuite() { @@ -65,16 +64,21 @@ func (s *SnapshotRecurringTestSuite) SetupSuite() { clusterConfig := new(clusters.ClusterConfig) operations.LoadObjectFromMap(defaults.ClusterConfigKey, s.cattleConfig, clusterConfig) + rancherConfig := new(rancher.Config) + operations.LoadObjectFromMap(defaults.RancherConfigKey, s.cattleConfig, rancherConfig) + provider := provisioning.CreateProvider(clusterConfig.Provider) machineConfigSpec := provider.LoadMachineConfigFunc(s.cattleConfig) - logrus.Info("Provisioning RKE2 cluster") - s.rke2Cluster, err = resources.ProvisionRKE2K3SCluster(s.T(), standardUserClient, extClusters.RKE2ClusterType.String(), provider, *clusterConfig, machineConfigSpec, nil, true, false) - require.NoError(s.T(), err) - - logrus.Info("Provisioning K3S cluster") - s.k3sCluster, err = resources.ProvisionRKE2K3SCluster(s.T(), standardUserClient, extClusters.K3SClusterType.String(), provider, *clusterConfig, machineConfigSpec, nil, true, false) - require.NoError(s.T(), err) + if rancherConfig.ClusterName == "" { + logrus.Info("Provisioning K3S cluster") + s.cluster, err = resources.ProvisionRKE2K3SCluster(s.T(), standardUserClient, extClusters.K3SClusterType.String(), provider, *clusterConfig, machineConfigSpec, nil, false, false) + require.NoError(s.T(), err) + } else { + logrus.Infof("Using existing cluster %s", rancherConfig.ClusterName) + s.cluster, err = client.Steve.SteveType(stevetypes.Provisioning).ByID("fleet-default/" + s.client.RancherConfig.ClusterName) + require.NoError(s.T(), err) + } } func (s *SnapshotRecurringTestSuite) TestSnapshotRecurringRestores() { @@ -87,18 +91,18 @@ func (s *SnapshotRecurringTestSuite) TestSnapshotRecurringRestores() { tests := []struct { name string etcdSnapshot *etcdsnapshot.Config - clusterID string + cluster *v1.SteveAPIObject }{ - {"RKE2_Recurring_Restores", snapshotRestoreFiveTimes, s.rke2Cluster.ID}, - {"K3S_Recurring_Restores", snapshotRestoreFiveTimes, s.k3sCluster.ID}, + {"K3S_Recurring_Restores", snapshotRestoreFiveTimes, s.cluster}, } for _, tt := range tests { - cluster, err := s.client.Steve.SteveType(stevetypes.Provisioning).ByID(tt.clusterID) - require.NoError(s.T(), err) - + var err error s.Run(tt.name, func() { - err := etcdsnapshot.CreateAndValidateSnapshotRestore(s.client, cluster.Name, tt.etcdSnapshot, containerImage) + cluster, err := s.client.Steve.SteveType(stevetypes.Provisioning).ByID(tt.cluster.ID) + require.NoError(s.T(), err) + + err = etcdsnapshot.CreateAndValidateSnapshotRestore(s.client, cluster.Name, tt.etcdSnapshot, containerImage) require.NoError(s.T(), err) }) diff --git a/validation/snapshot/rke2k3s/snapshot_restore_test.go b/validation/snapshot/k3s/snapshot_restore_test.go similarity index 71% rename from validation/snapshot/rke2k3s/snapshot_restore_test.go rename to validation/snapshot/k3s/snapshot_restore_test.go index ea75f615d..578bf97a0 100644 --- a/validation/snapshot/rke2k3s/snapshot_restore_test.go +++ b/validation/snapshot/k3s/snapshot_restore_test.go @@ -1,6 +1,6 @@ //go:build (validation || recurring || extended || infra.any || cluster.any) && !sanity && !stress -package rke2k3s +package k3s import ( "os" @@ -36,8 +36,7 @@ type SnapshotRestoreTestSuite struct { session *session.Session client *rancher.Client cattleConfig map[string]any - rke2Cluster *v1.SteveAPIObject - k3sCluster *v1.SteveAPIObject + cluster *v1.SteveAPIObject } func (s *SnapshotRestoreTestSuite) TearDownSuite() { @@ -70,16 +69,21 @@ func (s *SnapshotRestoreTestSuite) SetupSuite() { clusterConfig := new(clusters.ClusterConfig) operations.LoadObjectFromMap(defaults.ClusterConfigKey, s.cattleConfig, clusterConfig) + rancherConfig := new(rancher.Config) + operations.LoadObjectFromMap(defaults.RancherConfigKey, s.cattleConfig, rancherConfig) + provider := provisioning.CreateProvider(clusterConfig.Provider) machineConfigSpec := provider.LoadMachineConfigFunc(s.cattleConfig) - logrus.Info("Provisioning RKE2 cluster") - s.rke2Cluster, err = resources.ProvisionRKE2K3SCluster(s.T(), standardUserClient, extClusters.RKE2ClusterType.String(), provider, *clusterConfig, machineConfigSpec, nil, false, false) - require.NoError(s.T(), err) - - logrus.Info("Provisioning K3S cluster") - s.k3sCluster, err = resources.ProvisionRKE2K3SCluster(s.T(), standardUserClient, extClusters.K3SClusterType.String(), provider, *clusterConfig, machineConfigSpec, nil, false, false) - require.NoError(s.T(), err) + if rancherConfig.ClusterName == "" { + logrus.Info("Provisioning K3S cluster") + s.cluster, err = resources.ProvisionRKE2K3SCluster(s.T(), standardUserClient, extClusters.K3SClusterType.String(), provider, *clusterConfig, machineConfigSpec, nil, false, false) + require.NoError(s.T(), err) + } else { + logrus.Infof("Using existing cluster %s", rancherConfig.ClusterName) + s.cluster, err = client.Steve.SteveType(stevetypes.Provisioning).ByID("fleet-default/" + s.client.RancherConfig.ClusterName) + require.NoError(s.T(), err) + } } func snapshotRestoreConfigs() []*etcdsnapshot.Config { @@ -105,27 +109,24 @@ func snapshotRestoreConfigs() []*etcdsnapshot.Config { } func (s *SnapshotRestoreTestSuite) TestSnapshotRestore() { - snapshotRestoreConfigRKE2 := snapshotRestoreConfigs() - snapshotRestoreConfigK3s := snapshotRestoreConfigs() + snapshotRestoreConfig := snapshotRestoreConfigs() tests := []struct { name string etcdSnapshot *etcdsnapshot.Config - clusterID string + cluster *v1.SteveAPIObject }{ - {"RKE2_Restore_ETCD", snapshotRestoreConfigRKE2[0], s.rke2Cluster.ID}, - {"RKE2_Restore_ETCD_K8sVersion", snapshotRestoreConfigRKE2[1], s.rke2Cluster.ID}, - {"RKE2_Restore_Upgrade_Strategy", snapshotRestoreConfigRKE2[2], s.rke2Cluster.ID}, - {"K3S_Restore_ETCD", snapshotRestoreConfigK3s[0], s.k3sCluster.ID}, - {"K3S_Restore_ETCD_K8sVersion", snapshotRestoreConfigK3s[1], s.k3sCluster.ID}, - {"K3S_Restore_Upgrade_Strategy", snapshotRestoreConfigK3s[2], s.k3sCluster.ID}, + {"K3S_Restore_ETCD", snapshotRestoreConfig[0], s.cluster}, + {"K3S_Restore_ETCD_K8sVersion", snapshotRestoreConfig[1], s.cluster}, + {"K3S_Restore_Upgrade_Strategy", snapshotRestoreConfig[2], s.cluster}, } for _, tt := range tests { - cluster, err := s.client.Steve.SteveType(stevetypes.Provisioning).ByID(tt.clusterID) - require.NoError(s.T(), err) - + var err error s.Run(tt.name, func() { - err := etcdsnapshot.CreateAndValidateSnapshotRestore(s.client, cluster.Name, tt.etcdSnapshot, containerImage) + cluster, err := s.client.Steve.SteveType(stevetypes.Provisioning).ByID(tt.cluster.ID) + require.NoError(s.T(), err) + + err = etcdsnapshot.CreateAndValidateSnapshotRestore(s.client, cluster.Name, tt.etcdSnapshot, containerImage) require.NoError(s.T(), err) }) diff --git a/validation/snapshot/k3s/snapshot_retention_test.go b/validation/snapshot/k3s/snapshot_retention_test.go new file mode 100644 index 000000000..954e4b2c9 --- /dev/null +++ b/validation/snapshot/k3s/snapshot_retention_test.go @@ -0,0 +1,124 @@ +//go:build (validation || extended || infra.any || cluster.any) && !sanity && !stress + +package k3s + +import ( + "fmt" + "os" + "testing" + + "github.com/rancher/shepherd/clients/rancher" + v1 "github.com/rancher/shepherd/clients/rancher/v1" + extClusters "github.com/rancher/shepherd/extensions/clusters" + "github.com/rancher/shepherd/extensions/defaults/namespaces" + "github.com/rancher/shepherd/extensions/defaults/stevetypes" + "github.com/rancher/shepherd/pkg/config" + "github.com/rancher/shepherd/pkg/config/operations" + "github.com/rancher/shepherd/pkg/session" + "github.com/rancher/tests/actions/clusters" + "github.com/rancher/tests/actions/config/defaults" + "github.com/rancher/tests/actions/etcdsnapshot" + "github.com/rancher/tests/actions/logging" + "github.com/rancher/tests/actions/provisioning" + resources "github.com/rancher/tests/validation/provisioning/resources/provisioncluster" + standard "github.com/rancher/tests/validation/provisioning/resources/standarduser" + "github.com/sirupsen/logrus" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" +) + +type SnapshotRetentionTestSuite struct { + suite.Suite + session *session.Session + client *rancher.Client + cattleConfig map[string]any + cluster *v1.SteveAPIObject +} + +type SnapshotRetentionConfig struct { + ClusterName string `json:"clusterName" yaml:"clusterName"` + SnapshotInterval int `json:"snapshotInterval" yaml:"snapshotInterval"` + SnapshotRetention int `json:"snapshotRetention" yaml:"snapshotRetention"` +} + +func (s *SnapshotRetentionTestSuite) TearDownSuite() { + s.session.Cleanup() +} + +func (s *SnapshotRetentionTestSuite) SetupSuite() { + testSession := session.NewSession() + s.session = testSession + + client, err := rancher.NewClient("", s.session) + require.NoError(s.T(), err) + + s.client = client + + standardUserClient, _, _, err := standard.CreateStandardUser(s.client) + require.NoError(s.T(), err) + + s.cattleConfig = config.LoadConfigFromFile(os.Getenv(config.ConfigEnvironmentKey)) + + s.cattleConfig, err = defaults.LoadPackageDefaults(s.cattleConfig, "") + require.NoError(s.T(), err) + + loggingConfig := new(logging.Logging) + operations.LoadObjectFromMap(logging.LoggingKey, s.cattleConfig, loggingConfig) + + err = logging.SetLogger(loggingConfig) + require.NoError(s.T(), err) + + clusterConfig := new(clusters.ClusterConfig) + operations.LoadObjectFromMap(defaults.ClusterConfigKey, s.cattleConfig, clusterConfig) + + rancherConfig := new(rancher.Config) + operations.LoadObjectFromMap(defaults.RancherConfigKey, s.cattleConfig, rancherConfig) + + provider := provisioning.CreateProvider(clusterConfig.Provider) + machineConfigSpec := provider.LoadMachineConfigFunc(s.cattleConfig) + + if rancherConfig.ClusterName == "" { + logrus.Info("Provisioning K3S cluster") + s.cluster, err = resources.ProvisionRKE2K3SCluster(s.T(), standardUserClient, extClusters.K3SClusterType.String(), provider, *clusterConfig, machineConfigSpec, nil, false, false) + require.NoError(s.T(), err) + } else { + logrus.Infof("Using existing cluster %s", rancherConfig.ClusterName) + s.cluster, err = client.Steve.SteveType(stevetypes.Provisioning).ByID("fleet-default/" + s.client.RancherConfig.ClusterName) + require.NoError(s.T(), err) + } +} + +func (s *SnapshotRetentionTestSuite) TestAutomaticSnapshotRetention() { + tests := []struct { + testName string + cluster *v1.SteveAPIObject + retentionLimit int + intervalBetweenSnapshots int + }{ + {"K3S_Retention_Limit", s.cluster, 2, 1}, + } + + for _, tt := range tests { + s.Run(tt.testName, func() { + cluster, err := s.client.Steve.SteveType(stevetypes.Provisioning).ByID(tt.cluster.ID) + require.NoError(s.T(), err) + + clusterObject, clusterResponse, err := extClusters.GetProvisioningClusterByName(s.client, cluster.Name, namespaces.FleetDefault) + require.NoError(s.T(), err) + + clusterObject.Spec.RKEConfig.ETCD.SnapshotRetention = tt.retentionLimit + cronSchedule := fmt.Sprintf("%s%v%s", "*/", tt.intervalBetweenSnapshots, " * * * *") + clusterObject.Spec.RKEConfig.ETCD.SnapshotScheduleCron = cronSchedule + + _, err = s.client.Steve.SteveType(stevetypes.Provisioning).Update(clusterResponse, clusterObject) + require.NoError(s.T(), err) + + err = etcdsnapshot.CreateSnapshotsUntilRetentionLimit(s.client, cluster.Name, tt.retentionLimit, tt.intervalBetweenSnapshots) + require.NoError(s.T(), err) + }) + } +} + +func TestSnapshotRetentionTestSuite(t *testing.T) { + suite.Run(t, new(SnapshotRetentionTestSuite)) +} diff --git a/validation/snapshot/rke2k3s/snapshot_s3_restore_test.go b/validation/snapshot/k3s/snapshot_s3_restore_test.go similarity index 74% rename from validation/snapshot/rke2k3s/snapshot_s3_restore_test.go rename to validation/snapshot/k3s/snapshot_s3_restore_test.go index af02d26a2..1170164be 100644 --- a/validation/snapshot/rke2k3s/snapshot_s3_restore_test.go +++ b/validation/snapshot/k3s/snapshot_s3_restore_test.go @@ -1,6 +1,6 @@ //go:build (validation || extended || infra.any || cluster.any) && !sanity && !stress -package rke2k3s +package k3s import ( "os" @@ -32,8 +32,7 @@ type S3SnapshotRestoreTestSuite struct { session *session.Session client *rancher.Client cattleConfig map[string]any - rke2Cluster *v1.SteveAPIObject - k3sCluster *v1.SteveAPIObject + cluster *v1.SteveAPIObject } func (s *S3SnapshotRestoreTestSuite) TearDownSuite() { @@ -66,16 +65,21 @@ func (s *S3SnapshotRestoreTestSuite) SetupSuite() { clusterConfig := new(clusters.ClusterConfig) operations.LoadObjectFromMap(defaults.ClusterConfigKey, s.cattleConfig, clusterConfig) + rancherConfig := new(rancher.Config) + operations.LoadObjectFromMap(defaults.RancherConfigKey, s.cattleConfig, rancherConfig) + provider := provisioning.CreateProvider(clusterConfig.Provider) machineConfigSpec := provider.LoadMachineConfigFunc(s.cattleConfig) - logrus.Info("Provisioning RKE2 cluster") - s.rke2Cluster, err = resources.ProvisionRKE2K3SCluster(s.T(), standardUserClient, extClusters.RKE2ClusterType.String(), provider, *clusterConfig, machineConfigSpec, nil, true, false) - require.NoError(s.T(), err) - - logrus.Info("Provisioning K3S cluster") - s.k3sCluster, err = resources.ProvisionRKE2K3SCluster(s.T(), standardUserClient, extClusters.K3SClusterType.String(), provider, *clusterConfig, machineConfigSpec, nil, true, false) - require.NoError(s.T(), err) + if rancherConfig.ClusterName == "" { + logrus.Info("Provisioning K3S cluster") + s.cluster, err = resources.ProvisionRKE2K3SCluster(s.T(), standardUserClient, extClusters.K3SClusterType.String(), provider, *clusterConfig, machineConfigSpec, nil, false, false) + require.NoError(s.T(), err) + } else { + logrus.Infof("Using existing cluster %s", rancherConfig.ClusterName) + s.cluster, err = client.Steve.SteveType(stevetypes.Provisioning).ByID("fleet-default/" + s.client.RancherConfig.ClusterName) + require.NoError(s.T(), err) + } } func (s *S3SnapshotRestoreTestSuite) TestS3SnapshotRestore() { @@ -88,18 +92,18 @@ func (s *S3SnapshotRestoreTestSuite) TestS3SnapshotRestore() { tests := []struct { name string etcdSnapshot *etcdsnapshot.Config - clusterID string + cluster *v1.SteveAPIObject }{ - {"RKE2_S3_Restore", snapshotRestoreNone, s.rke2Cluster.ID}, - {"K3S_S3_Restore", snapshotRestoreNone, s.k3sCluster.ID}, + {"K3S_S3_Restore", snapshotRestoreNone, s.cluster}, } for _, tt := range tests { - cluster, err := s.client.Steve.SteveType(stevetypes.Provisioning).ByID(tt.clusterID) - require.NoError(s.T(), err) - + var err error s.Run(tt.name, func() { - err := etcdsnapshot.CreateAndValidateSnapshotRestore(s.client, cluster.Name, tt.etcdSnapshot, containerImage) + cluster, err := s.client.Steve.SteveType(stevetypes.Provisioning).ByID(tt.cluster.ID) + require.NoError(s.T(), err) + + err = etcdsnapshot.CreateAndValidateSnapshotRestore(s.client, cluster.Name, tt.etcdSnapshot, containerImage) require.NoError(s.T(), err) }) diff --git a/validation/snapshot/rke2/README.md b/validation/snapshot/rke2/README.md new file mode 100644 index 000000000..7d84c7795 --- /dev/null +++ b/validation/snapshot/rke2/README.md @@ -0,0 +1,171 @@ +# RKE2 Snapshot Configs + +## Table of Contents +1. [Prerequisites](../README.md) +2. [Tests Cases](#Test-Cases) +3. [Configurations](#Configurations) +4. [Configuration Defaults](#defaults) +5. [Logging Levels](#Logging) +6. [Back to general snapshot](../README.md) + +## Test Cases +All of the test cases in this package are listed below, keep in mind that all configuration for these tests have built in defaults [Configuration Defaults](#defaults). These tests will provision a cluster if one is not provided via the rancher.ClusterName field. + + +### Recurring Snapshot Test + +#### Description: +The recurring snapshot test verifies that a cluster can create a series of snapshots. All configurations are not required if an already provisioned cluster is provided to the test. + +#### Required Configurations: +1. [Cloud Credential](#cloud-credential-config) +2. [Cluster Config](#cluster-config) +3. [Machine Config](#machine-config) + +#### Table Tests: +1. `RKE2_Recurring_Restores` + +#### Run Commands: +1. `gotestsum --format standard-verbose --packages=github.com/rancher/tests/validation/snapshot/rke2 --junitfile results.xml --jsonfile results.json -- -tags=validation -run TestSnapshotRecurringTestSuite/TestSnapshotRecurringRestores -timeout=1h -v` + + +### Snapshot Restore Test + +#### Description: +The snapshot restore test validates that snapshots can be created and restored without any failures or longterm disruption to workloads. + +#### Required Configurations: +1. [Cloud Credential](#cloud-credential-config) +2. [Cluster Config](#cluster-config) +3. [Machine Config](#machine-config) + +#### Table Tests: +1. `RKE2_Restore_ETCD` +2. `RKE2_Restore_ETCD_K8sVersion` +3. `RKE2_Restore_ETCD` + +#### Run Commands: +1. `gotestsum --format standard-verbose --packages=github.com/rancher/tests/validation/snapshot/rke2 --junitfile results.xml --jsonfile results.json -- -tags=validation -run TestSnapshotRestoreTestSuite/TestSnapshotRestore -timeout=1h -v` + + +### Snapshot Retention Test + +#### Description: +The snapshot retention test validates that the configured number of snapshots are retained and older snapshots are deleted as expected. + +#### Required Configurations: +1. [Cloud Credential](#cloud-credential-config) +2. [Cluster Config](#cluster-config) +3. [Machine Config](#machine-config) + +#### Table Tests: +1. `RKE2_Snapshot_Retention` + +#### Run Commands: +1. `gotestsum --format standard-verbose --packages=github.com/rancher/tests/validation/snapshot/rke2 --junitfile results.xml --jsonfile results.json -- -tags=validation -run TestSnapshotRetentionTestSuite/TestSnapshotRetention -timeout=1h -v` + + +### Snapshot Windows Test + +#### Description: +The snapshot windows test verifies that snapshots can be created and restored on a cluster containing windows nodes + +#### Required Configurations: +1. [Cloud Credential](#cloud-credential-config) +2. [Cluster Config](#cluster-config) +3. [Machine Config](#machine-config) + +#### Table Tests: +1. `RKE2_Windows_Restore` + +#### Run Commands: +1. `gotestsum --format standard-verbose --packages=github.com/rancher/tests/validation/snapshot/rke2 --junitfile results.xml --jsonfile results.json -- -tags=validation -run TestSnapshotRestoreWindowsTestSuite/TestSnapshotRestoreWindows -timeout=1h -v` + + +### Snapshot S3 Test + +#### Description: +The snapshot S3 test validates that snapshots can be stored and restored from an S3 bucket. + +#### Required Configurations: +1. [Cloud Credential](#cloud-credential-config) +2. [Cluster Config](#cluster-config) +3. [Machine Config](#machine-config) +4. S3 configuration in etcd section of cluster config + +#### Table Tests: +1. `RKE2_Snapshot_S3` + +#### Run Commands: +1. `gotestsum --format standard-verbose --packages=github.com/rancher/tests/validation/snapshot/rke2 --junitfile results.xml --jsonfile results.json -- -tags=validation -run TestSnapshotS3TestSuite/TestSnapshotS3 -timeout=1h -v` + +### Dualstack Snapshot Restore Test + +#### Description: +The dualstack snapshot restore test validates that a cluster configured for dualstack networking can create and restore snapshots successfully. + +#### Required Configurations: +1. [Cloud Credential](#cloud-credential-config) +2. [Cluster Config](#cluster-config) +3. [Machine Config](#machine-config) + +#### Table Tests: +1. `RKE2_Dualstack_Snapshot_Restore` + +#### Run Commands: +1. `gotestsum --format standard-verbose --packages=github.com/rancher/tests/validation/snapshot/rke2/dualstack --junitfile results.xml --jsonfile results.json -- -tags=validation -run TestSnapshotDualstackRestoreTestSuite/TestSnapshotDualstackRestore -timeout=1h -v` + + +### IPv6 Snapshot Tests + +#### Description: +The IPv6 snapshot tests validate snapshot creation and restore functionality on clusters configured with IPv6 networking. + +#### Required Configurations: +1. [Cloud Credential](#cloud-credential-config) +2. [Cluster Config](#cluster-config) (with IPv6 settings) +3. [Machine Config](#machine-config) + +#### Table Tests: +1. `RKE2_IPv6_Restore_ETCD` +2. `RKE2_IPv6_Restore_ETCD_K8sVersion` +3. `RKE2_IPv6_Restore_Upgrade_Strategy` + +#### Run Commands: +1. `gotestsum --format standard-verbose --packages=github.com/rancher/tests/validation/snapshot/rke2/ipv6 --junitfile results.xml --jsonfile results.json -- -tags=validation -run TestSnapshotIPv6RestoreTestSuite/TestSnapshotIPv6Restore -timeout=2h -v` + +## Configurations + +### Existing cluster: +```yaml +rancher: + host: + adminToken: + clusterName: "" + cleanup: true + insecure: true +``` + +### Provisioning cluster +This test will create a cluster if one is not provided, see to configure a node driver OR custom cluster depending on the snapshot test [RKE2 provisioning](../../provisioning/rke2/README.md) + + +## Defaults +This package contains a defaults folder which contains default test configuration data for non-sensitive fields. The goal of this data is to: +1. Reduce the number of fields the user needs to provide in the cattle_config file. +2. Reduce the amount of yaml data that needs to be stored in our pipelines. +3. Make it easier to run tests + +Any data the user provides will override these defaults which are stored here: [defaults](defaults/defaults.yaml). + +## Logging +This package supports several logging levels. You can set the logging levels via the cattle config and all levels above the provided level will be logged while all logs below that logging level will be omitted. + +```yaml +logging: + level: "trace" #trace debug, info, warning, error +``` + +## Additional +1. If the tests passes immediately without warning, try adding the `-count=1` or run `go clean -cache`. This will avoid previous results from interfering with the new test run. +2. All of the tests utilize parallelism when running for more finite control of how things are run in parallel use the -p and -parallel. \ No newline at end of file diff --git a/validation/snapshot/rke2/defaults/defaults.yaml b/validation/snapshot/rke2/defaults/defaults.yaml new file mode 100644 index 000000000..e69de29bb diff --git a/validation/snapshot/rke2/dualstack/defaults/defaults.yaml b/validation/snapshot/rke2/dualstack/defaults/defaults.yaml new file mode 100644 index 000000000..6b75d274e --- /dev/null +++ b/validation/snapshot/rke2/dualstack/defaults/defaults.yaml @@ -0,0 +1,13 @@ +#Required for all dualstack/ipv6 tests +clusterConfig: + networking: + clusterCIDR: "" + serviceCIDR: "" + stackPreference: "" + +awsMachineConfigs: + awsMachineConfig: + - enablePrimaryIPv6: true + httpProtocolIpv6: "enabled" + ipv6AddressOnly: true + ipv6AddressCount: "1" \ No newline at end of file diff --git a/validation/snapshot/dualstack/schemas/hostbusters_schemas.yaml b/validation/snapshot/rke2/dualstack/schemas/hostbusters_schemas.yaml similarity index 50% rename from validation/snapshot/dualstack/schemas/hostbusters_schemas.yaml rename to validation/snapshot/rke2/dualstack/schemas/hostbusters_schemas.yaml index 2a0ff1b58..5ef22a906 100644 --- a/validation/snapshot/dualstack/schemas/hostbusters_schemas.yaml +++ b/validation/snapshot/rke2/dualstack/schemas/hostbusters_schemas.yaml @@ -1,4 +1,4 @@ -- suite: Go Automation/Snapshot +- suite: Go Automation/Snapshot/rke2/dualstack projects: [RRT, RM] cases: - description: Creates and restores a snapshot on an existing cluster @@ -66,90 +66,6 @@ is_flaky: 0 automation: 2 steps: - - action: Create a snapshot - expectedresult: "" - data: "" - position: 1 - attachments: [] - - action: Restore the snapshot - expectedresult: "" - data: "" - position: 2 - attachments: [] - - action: Verify cluster state - expectedresult: "" - data: "" - position: 3 - attachments: [] - custom_field: - "14": Validation - "18": Hostbusters - - - description: Creates and restores a snapshot on an existing cluster - title: K3S_Dualstack_Restore_ETCD - priority: 4 - type: 8 - is_flaky: 0 - automation: 2 - steps: - - action: Create a snapshot - expectedresult: "" - data: "" - position: 1 - attachments: [] - - action: Restore to the snapshot - expectedresult: "" - data: "" - position: 2 - attachments: [] - - action: Verify cluster state - expectedresult: "" - data: "" - position: 3 - attachments: [] - custom_field: - "14": Validation - "18": Hostbusters - - - description: Upgrades the kubernetes version and restores a snapshot - title: K3S_Dualstack_Restore_ETCD_K8sVersion - priority: 4 - type: 8 - is_flaky: 0 - automation: 2 - steps: - - action: Create a snapshot - expectedresult: "" - data: "" - position: 1 - attachments: [] - - action: Upgrade the kubernetes version - expectedresult: "" - data: "" - position: 2 - attachments: [] - - action: Restore to the snapshot - expectedresult: "" - data: "" - position: 3 - attachments: [] - - action: Verify cluster state - expectedresult: "" - data: "" - position: 4 - attachments: [] - attachments: [] - custom_field: - "14": Validation - "18": Hostbusters - - - description: Creates and restores a snapshot with upgrade strategy - title: K3S_Dualstack_Restore_Upgrade_Strategy - priority: 5 - type: 8 - is_flaky: 0 - automation: 2 - steps: - action: Create a snapshot expectedresult: "" data: "" diff --git a/validation/snapshot/dualstack/snapshot_restore_test.go b/validation/snapshot/rke2/dualstack/snapshot_restore_test.go similarity index 72% rename from validation/snapshot/dualstack/snapshot_restore_test.go rename to validation/snapshot/rke2/dualstack/snapshot_restore_test.go index e4bf204ee..ad356fce5 100644 --- a/validation/snapshot/dualstack/snapshot_restore_test.go +++ b/validation/snapshot/rke2/dualstack/snapshot_restore_test.go @@ -35,8 +35,7 @@ type SnapshotDualstackRestoreTestSuite struct { session *session.Session client *rancher.Client cattleConfig map[string]any - rke2Cluster *v1.SteveAPIObject - k3sCluster *v1.SteveAPIObject + cluster *v1.SteveAPIObject } func (s *SnapshotDualstackRestoreTestSuite) TearDownSuite() { @@ -69,16 +68,21 @@ func (s *SnapshotDualstackRestoreTestSuite) SetupSuite() { clusterConfig := new(clusters.ClusterConfig) operations.LoadObjectFromMap(defaults.ClusterConfigKey, s.cattleConfig, clusterConfig) + rancherConfig := new(rancher.Config) + operations.LoadObjectFromMap(defaults.RancherConfigKey, s.cattleConfig, rancherConfig) + provider := provisioning.CreateProvider(clusterConfig.Provider) machineConfigSpec := provider.LoadMachineConfigFunc(s.cattleConfig) - logrus.Info("Provisioning RKE2 cluster") - s.rke2Cluster, err = resources.ProvisionRKE2K3SCluster(s.T(), standardUserClient, extClusters.RKE2ClusterType.String(), provider, *clusterConfig, machineConfigSpec, nil, false, false) - require.NoError(s.T(), err) - - logrus.Info("Provisioning K3S cluster") - s.k3sCluster, err = resources.ProvisionRKE2K3SCluster(s.T(), standardUserClient, extClusters.K3SClusterType.String(), provider, *clusterConfig, machineConfigSpec, nil, false, false) - require.NoError(s.T(), err) + if rancherConfig.ClusterName == "" { + logrus.Info("Provisioning RKE2 cluster") + s.cluster, err = resources.ProvisionRKE2K3SCluster(s.T(), standardUserClient, extClusters.RKE2ClusterType.String(), provider, *clusterConfig, machineConfigSpec, nil, true, false) + require.NoError(s.T(), err) + } else { + logrus.Infof("Using existing cluster %s", rancherConfig.ClusterName) + s.cluster, err = client.Steve.SteveType(stevetypes.Provisioning).ByID("fleet-default/" + s.client.RancherConfig.ClusterName) + require.NoError(s.T(), err) + } } func snapshotRestoreConfigs() []*etcdsnapshot.Config { @@ -104,27 +108,24 @@ func snapshotRestoreConfigs() []*etcdsnapshot.Config { } func (s *SnapshotDualstackRestoreTestSuite) TestSnapshotDualstackRestore() { - snapshotRestoreConfigRKE2 := snapshotRestoreConfigs() - snapshotRestoreConfigK3s := snapshotRestoreConfigs() + snapshotRestoreConfig := snapshotRestoreConfigs() tests := []struct { name string etcdSnapshot *etcdsnapshot.Config - clusterID string + cluster *v1.SteveAPIObject }{ - {"RKE2_Dualstack_Restore_ETCD", snapshotRestoreConfigRKE2[0], s.rke2Cluster.ID}, - {"RKE2_Dualstack_Restore_ETCD_K8sVersion", snapshotRestoreConfigRKE2[1], s.rke2Cluster.ID}, - {"RKE2_Dualstack_Restore_Upgrade_Strategy", snapshotRestoreConfigRKE2[2], s.rke2Cluster.ID}, - {"K3S_Dualstack_Restore_ETCD", snapshotRestoreConfigK3s[0], s.k3sCluster.ID}, - {"K3S_Dualstack_Restore_ETCD_K8sVersion", snapshotRestoreConfigK3s[1], s.k3sCluster.ID}, - {"K3S_Dualstack_Restore_Upgrade_Strategy", snapshotRestoreConfigK3s[2], s.k3sCluster.ID}, + {"RKE2_Dualstack_Restore_ETCD", snapshotRestoreConfig[0], s.cluster}, + {"RKE2_Dualstack_Restore_ETCD_K8sVersion", snapshotRestoreConfig[1], s.cluster}, + {"RKE2_Dualstack_Restore_Upgrade_Strategy", snapshotRestoreConfig[2], s.cluster}, } for _, tt := range tests { - cluster, err := s.client.Steve.SteveType(stevetypes.Provisioning).ByID(tt.clusterID) - require.NoError(s.T(), err) - + var err error s.Run(tt.name, func() { - err := etcdsnapshot.CreateAndValidateSnapshotRestore(s.client, cluster.Name, tt.etcdSnapshot, containerImage) + cluster, err := s.client.Steve.SteveType(stevetypes.Provisioning).ByID(tt.cluster.ID) + require.NoError(s.T(), err) + + err = etcdsnapshot.CreateAndValidateSnapshotRestore(s.client, cluster.Name, tt.etcdSnapshot, containerImage) require.NoError(s.T(), err) }) diff --git a/validation/snapshot/rke2/ipv6/defaults/defaults.yaml b/validation/snapshot/rke2/ipv6/defaults/defaults.yaml new file mode 100644 index 000000000..6b75d274e --- /dev/null +++ b/validation/snapshot/rke2/ipv6/defaults/defaults.yaml @@ -0,0 +1,13 @@ +#Required for all dualstack/ipv6 tests +clusterConfig: + networking: + clusterCIDR: "" + serviceCIDR: "" + stackPreference: "" + +awsMachineConfigs: + awsMachineConfig: + - enablePrimaryIPv6: true + httpProtocolIpv6: "enabled" + ipv6AddressOnly: true + ipv6AddressCount: "1" \ No newline at end of file diff --git a/validation/snapshot/rke2/ipv6/schemas/hostbusters_schemas.yaml b/validation/snapshot/rke2/ipv6/schemas/hostbusters_schemas.yaml new file mode 100644 index 000000000..11a4dd4c5 --- /dev/null +++ b/validation/snapshot/rke2/ipv6/schemas/hostbusters_schemas.yaml @@ -0,0 +1,86 @@ +- suite: Go Automation/Snapshot/rke2/ipv6 + projects: [RRT, RM] + cases: + - description: Creates and restores a snapshot on an existing cluster + title: RKE2_Restore_ETCD + priority: 4 + type: 8 + is_flaky: 0 + automation: 2 + steps: + - action: Create a snapshot + expectedresult: "" + data: "" + position: 1 + attachments: [] + - action: Restore to the snapshot + expectedresult: "" + data: "" + position: 2 + attachments: [] + - action: Verify cluster state + expectedresult: "" + data: "" + position: 3 + attachments: [] + custom_field: + "14": Validation + "18": Hostbusters + + - description: Upgrades the kubernetes version and restores a snapshot + title: RKE2_Restore_ETCD_K8sVersion + priority: 4 + type: 8 + is_flaky: 0 + automation: 2 + steps: + - action: Create a snapshot + expectedresult: "" + data: "" + position: 1 + attachments: [] + - action: Upgrade the kubernetes version + expectedresult: "" + data: "" + position: 2 + attachments: [] + - action: Restore to the snapshot + expectedresult: "" + data: "" + position: 3 + attachments: [] + - action: Verify cluster state + expectedresult: "" + data: "" + position: 4 + attachments: [] + attachments: [] + custom_field: + "14": Validation + "18": Hostbusters + + - description: Creates and restores a snapshot with upgrade strategy + title: RKE2_Restore_Upgrade_Strategy + priority: 5 + type: 8 + is_flaky: 0 + automation: 2 + steps: + - action: Create a snapshot + expectedresult: "" + data: "" + position: 1 + attachments: [] + - action: Restore the snapshot + expectedresult: "" + data: "" + position: 2 + attachments: [] + - action: Verify cluster state + expectedresult: "" + data: "" + position: 3 + attachments: [] + custom_field: + "14": Validation + "18": Hostbusters \ No newline at end of file diff --git a/validation/snapshot/rke2/ipv6/snapshot_restore_test.go b/validation/snapshot/rke2/ipv6/snapshot_restore_test.go new file mode 100644 index 000000000..875b11949 --- /dev/null +++ b/validation/snapshot/rke2/ipv6/snapshot_restore_test.go @@ -0,0 +1,144 @@ +//go:build validation || recurring + +package ipv6 + +import ( + "os" + "testing" + + "github.com/rancher/shepherd/clients/rancher" + v1 "github.com/rancher/shepherd/clients/rancher/v1" + extClusters "github.com/rancher/shepherd/extensions/clusters" + "github.com/rancher/shepherd/extensions/defaults/stevetypes" + "github.com/rancher/shepherd/pkg/config" + "github.com/rancher/shepherd/pkg/config/operations" + "github.com/rancher/shepherd/pkg/session" + "github.com/rancher/tests/actions/clusters" + "github.com/rancher/tests/actions/config/defaults" + "github.com/rancher/tests/actions/etcdsnapshot" + "github.com/rancher/tests/actions/logging" + "github.com/rancher/tests/actions/provisioning" + "github.com/rancher/tests/actions/qase" + resources "github.com/rancher/tests/validation/provisioning/resources/provisioncluster" + standard "github.com/rancher/tests/validation/provisioning/resources/standarduser" + "github.com/sirupsen/logrus" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" +) + +const ( + containerImage = "nginx" +) + +type SnapshotIPv6RestoreTestSuite struct { + suite.Suite + session *session.Session + client *rancher.Client + cattleConfig map[string]any + cluster *v1.SteveAPIObject +} + +func (s *SnapshotIPv6RestoreTestSuite) TearDownSuite() { + s.session.Cleanup() +} + +func (s *SnapshotIPv6RestoreTestSuite) SetupSuite() { + testSession := session.NewSession() + s.session = testSession + + client, err := rancher.NewClient("", s.session) + require.NoError(s.T(), err) + + s.client = client + + standardUserClient, _, _, err := standard.CreateStandardUser(s.client) + require.NoError(s.T(), err) + + s.cattleConfig = config.LoadConfigFromFile(os.Getenv(config.ConfigEnvironmentKey)) + + s.cattleConfig, err = defaults.LoadPackageDefaults(s.cattleConfig, "") + require.NoError(s.T(), err) + + loggingConfig := new(logging.Logging) + operations.LoadObjectFromMap(logging.LoggingKey, s.cattleConfig, loggingConfig) + + err = logging.SetLogger(loggingConfig) + require.NoError(s.T(), err) + + clusterConfig := new(clusters.ClusterConfig) + operations.LoadObjectFromMap(defaults.ClusterConfigKey, s.cattleConfig, clusterConfig) + + rancherConfig := new(rancher.Config) + operations.LoadObjectFromMap(defaults.RancherConfigKey, s.cattleConfig, rancherConfig) + + provider := provisioning.CreateProvider(clusterConfig.Provider) + machineConfigSpec := provider.LoadMachineConfigFunc(s.cattleConfig) + + if rancherConfig.ClusterName == "" { + logrus.Info("Provisioning RKE2 cluster") + s.cluster, err = resources.ProvisionRKE2K3SCluster(s.T(), standardUserClient, extClusters.RKE2ClusterType.String(), provider, *clusterConfig, machineConfigSpec, nil, false, false) + require.NoError(s.T(), err) + } else { + logrus.Infof("Using existing cluster %s", rancherConfig.ClusterName) + s.cluster, err = client.Steve.SteveType(stevetypes.Provisioning).ByID("fleet-default/" + s.client.RancherConfig.ClusterName) + require.NoError(s.T(), err) + } +} + +func snapshotRestoreConfigs() []*etcdsnapshot.Config { + return []*etcdsnapshot.Config{ + { + UpgradeKubernetesVersion: "", + SnapshotRestore: "none", + RecurringRestores: 1, + }, + { + UpgradeKubernetesVersion: "", + SnapshotRestore: "kubernetesVersion", + RecurringRestores: 1, + }, + { + UpgradeKubernetesVersion: "", + SnapshotRestore: "all", + ControlPlaneConcurrencyValue: "15%", + WorkerConcurrencyValue: "20%", + RecurringRestores: 1, + }, + } +} + +func (s *SnapshotIPv6RestoreTestSuite) TestSnapshotIPv6Restore() { + snapshotRestoreConfig := snapshotRestoreConfigs() + + tests := []struct { + name string + etcdSnapshot *etcdsnapshot.Config + cluster *v1.SteveAPIObject + clusterType string + }{ + {"RKE2_IPv6_Restore_ETCD", snapshotRestoreConfig[0], s.cluster, defaults.RKE2}, + {"RKE2_IPv6_Restore_ETCD_K8sVersion", snapshotRestoreConfig[1], s.cluster, defaults.RKE2}, + {"RKE2_IPv6_Restore_Upgrade_Strategy", snapshotRestoreConfig[2], s.cluster, defaults.RKE2}, + } + + for _, tt := range tests { + var err error + s.Run(tt.name, func() { + cluster, err := s.client.Steve.SteveType(stevetypes.Provisioning).ByID(tt.cluster.ID) + require.NoError(s.T(), err) + + err = etcdsnapshot.CreateAndValidateSnapshotRestore(s.client, cluster.Name, tt.etcdSnapshot, containerImage) + require.NoError(s.T(), err) + }) + + params := provisioning.GetProvisioningSchemaParams(s.client, s.cattleConfig) + err = qase.UpdateSchemaParameters(tt.name, params) + if err != nil { + logrus.Warningf("Failed to upload schema parameters %s", err) + } + } +} + +func TestSnapshotIPv6RestoreTestSuite(t *testing.T) { + suite.Run(t, new(SnapshotIPv6RestoreTestSuite)) +} diff --git a/validation/snapshot/rke2k3s/schemas/hostbusters_schemas.yaml b/validation/snapshot/rke2/schemas/hostbusters_schemas.yaml similarity index 52% rename from validation/snapshot/rke2k3s/schemas/hostbusters_schemas.yaml rename to validation/snapshot/rke2/schemas/hostbusters_schemas.yaml index 228f59992..32a786ada 100644 --- a/validation/snapshot/rke2k3s/schemas/hostbusters_schemas.yaml +++ b/validation/snapshot/rke2/schemas/hostbusters_schemas.yaml @@ -1,4 +1,4 @@ -- suite: Go Automation/Snapshot +- suite: Go Automation/Snapshot/rke2 projects: [RRT, RM] cases: - description: Creates and restores a snapshot on an existing cluster @@ -27,32 +27,6 @@ "14": Validation "18": Hostbusters - - description: Creates and restores a snapshot on an existing cluster - title: K3S_Restore_ETCD - priority: 4 - type: 8 - is_flaky: 0 - automation: 2 - steps: - - action: Create a snapshot - expectedresult: "" - data: "" - position: 1 - attachments: [] - - action: Restore to the snapshot - expectedresult: "" - data: "" - position: 2 - attachments: [] - - action: Verify cluster state - expectedresult: "" - data: "" - position: 3 - attachments: [] - custom_field: - "14": Validation - "18": Hostbusters - - description: Upgrades the kubernetes version and restores a snapshot title: RKE2_Restore_ETCD_K8sVersion priority: 4 @@ -85,37 +59,6 @@ "14": Validation "18": Hostbusters - - description: Upgrades the kubernetes version and restores a snapshot - title: K3S_Restore_ETCD_K8sVersion - priority: 4 - type: 8 - is_flaky: 0 - automation: 2 - steps: - - action: Create a snapshot - expectedresult: "" - data: "" - position: 1 - attachments: [] - - action: Upgrade the kubernetes version - expectedresult: "" - data: "" - position: 2 - attachments: [] - - action: Restore to the snapshot - expectedresult: "" - data: "" - position: 3 - attachments: [] - - action: Verify cluster state - expectedresult: "" - data: "" - position: 4 - attachments: [] - custom_field: - "14": Validation - "18": Hostbusters - - description: Creates and restores a S3 snapshot on an existing cluster title: RKE2_S3_Restore priority: 4 @@ -147,37 +90,6 @@ "14": Validation "18": Hostbusters - - description: Creates and restores a S3 snapshot on an existing cluster - title: K3S_S3_Restore - priority: 4 - type: 8 - is_flaky: 0 - automation: 2 - steps: - - action: Create rancher provider credentials - expectedresult: "" - data: "" - position: 1 - attachments: [] - - action: Create an S3 snapshot - expectedresult: "" - data: "" - position: 2 - attachments: [] - - action: Restore to the S3 snapshot - expectedresult: "" - data: "" - position: 3 - attachments: [] - - action: Verify cluster state - expectedresult: "" - data: "" - position: 4 - attachments: [] - custom_field: - "14": Validation - "18": Hostbusters - - description: Creates snapshots until the retention limit is reached title: RKE2_Retention_Limit priority: 6 @@ -209,32 +121,6 @@ "14": Validation "18": Hostbusters - - description: Creates snapshots until the retention limit is reached - title: K3S_Retention_Limit - priority: 6 - type: 8 - is_flaky: 0 - automation: 2 - steps: - - action: Set the retention limit and interval - expectedresult: "" - data: "" - position: 1 - attachments: [] - - action: Wait for retention limit to be reached - expectedresult: "" - data: "" - position: 2 - attachments: [] - - action: Verify snapshot limit is respected - expectedresult: "" - data: "" - position: 3 - attachments: [] - custom_field: - "14": Validation - "18": Hostbusters - - description: Creates and restores a snapshot on an existing windows cluster title: RKE2_Windows_Restore priority: 5 @@ -287,32 +173,6 @@ "14": Validation "18": Hostbusters - - description: Creates and restores a snapshot with upgrade strategy - title: K3S_Restore_Upgrade_Strategy - priority: 5 - type: 8 - is_flaky: 0 - automation: 2 - steps: - - action: Create a snapshot - expectedresult: "" - data: "" - position: 1 - attachments: [] - - action: Restore the snapshot - expectedresult: "" - data: "" - position: 2 - attachments: [] - - action: Verify cluster state - expectedresult: "" - data: "" - position: 3 - attachments: [] - custom_field: - "14": Validation - "18": Hostbusters - - description: Replaces all control plane nodes on a cluster and restores a snapshot title: RKE2_Replace_Control_Plane_Nodes priority: 5 @@ -406,99 +266,6 @@ "14": Validation "18": Hostbusters - - description: Replaces all control plane nodes on a cluster and restores a snapshot - title: K3S_Replace_Control_Plane_Nodes - priority: 5 - type: 8 - is_flaky: 0 - automation: 2 - steps: - - action: Create a snapshot - expectedresult: "" - data: "" - position: 1 - attachments: [] - - action: Replace control plane nodes - expectedresult: "" - data: "" - position: 2 - attachments: [] - - action: Restore the snapshot - expectedresult: "" - data: "" - position: 3 - attachments: [] - - action: Verify cluster state - expectedresult: "" - data: "" - position: 4 - attachments: [] - custom_field: - "14": Validation - "18": Hostbusters - - - description: Replaces all etcd nodes on a cluster and restores a snapshot - title: K3S_Replace_ETCD_Nodes - priority: 5 - type: 8 - is_flaky: 0 - automation: 2 - steps: - - action: Create a snapshot - expectedresult: "" - data: "" - position: 1 - attachments: [] - - action: Replace etcd nodes - expectedresult: "" - data: "" - position: 2 - attachments: [] - - action: Restore the snapshot - expectedresult: "" - data: "" - position: 3 - attachments: [] - - action: Verify cluster state - expectedresult: "" - data: "" - position: 4 - attachments: [] - custom_field: - "14": Validation - "18": Hostbusters - - - description: Replaces all worker nodes on a cluster and restores a snapshot - title: K3S_Replace_Worker_Nodes - priority: 5 - type: 8 - is_flaky: 0 - automation: 2 - steps: - - action: Create a snapshot - expectedresult: "" - data: "" - position: 1 - attachments: [] - - action: Replace worker nodes - expectedresult: "" - data: "" - position: 2 - attachments: [] - - action: Restore the snapshot - expectedresult: "" - data: "" - position: 3 - attachments: [] - - action: Verify cluster state - expectedresult: "" - data: "" - position: 4 - attachments: [] - custom_field: - "14": Validation - "18": Hostbusters - - description: Creates and restores a series of snapshots title: RKE2_Recurring_Restores priority: 6 @@ -506,37 +273,6 @@ is_flaky: 0 automation: 2 steps: - - action: Create a snapshot - expectedresult: "" - data: "" - position: 1 - attachments: [] - - action: Restore the snapshot - expectedresult: "" - data: "" - position: 2 - attachments: [] - - action: Verify cluster state - expectedresult: "" - data: "" - position: 3 - attachments: [] - - action: Repeat steps 1-3 - expectedresult: "" - data: "" - position: 4 - attachments: [] - custom_field: - "14": Validation - "18": Hostbusters - - - description: Creates and restores a series of snapshots - title: K3S_Recurring_Restores - priority: 6 - type: 8 - is_flaky: 0 - automation: 2 - steps: - action: Create a snapshot expectedresult: "" data: "" diff --git a/validation/snapshot/rke2/snapshot_recurring_test.go b/validation/snapshot/rke2/snapshot_recurring_test.go new file mode 100644 index 000000000..e228fff9e --- /dev/null +++ b/validation/snapshot/rke2/snapshot_recurring_test.go @@ -0,0 +1,119 @@ +//go:build validation || recurring + +package rke2 + +import ( + "os" + "testing" + + "github.com/rancher/shepherd/clients/rancher" + v1 "github.com/rancher/shepherd/clients/rancher/v1" + extClusters "github.com/rancher/shepherd/extensions/clusters" + "github.com/rancher/shepherd/extensions/defaults/stevetypes" + "github.com/rancher/shepherd/pkg/config" + "github.com/rancher/shepherd/pkg/config/operations" + "github.com/rancher/shepherd/pkg/session" + "github.com/rancher/tests/actions/clusters" + "github.com/rancher/tests/actions/config/defaults" + "github.com/rancher/tests/actions/etcdsnapshot" + "github.com/rancher/tests/actions/logging" + "github.com/rancher/tests/actions/provisioning" + "github.com/rancher/tests/actions/qase" + resources "github.com/rancher/tests/validation/provisioning/resources/provisioncluster" + standard "github.com/rancher/tests/validation/provisioning/resources/standarduser" + "github.com/sirupsen/logrus" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" +) + +type SnapshotRecurringTestSuite struct { + suite.Suite + session *session.Session + client *rancher.Client + cattleConfig map[string]any + cluster *v1.SteveAPIObject +} + +func (s *SnapshotRecurringTestSuite) TearDownSuite() { + s.session.Cleanup() +} + +func (s *SnapshotRecurringTestSuite) SetupSuite() { + testSession := session.NewSession() + s.session = testSession + + client, err := rancher.NewClient("", s.session) + require.NoError(s.T(), err) + + s.client = client + + standardUserClient, _, _, err := standard.CreateStandardUser(s.client) + require.NoError(s.T(), err) + + s.cattleConfig = config.LoadConfigFromFile(os.Getenv(config.ConfigEnvironmentKey)) + + s.cattleConfig, err = defaults.LoadPackageDefaults(s.cattleConfig, "") + require.NoError(s.T(), err) + + loggingConfig := new(logging.Logging) + operations.LoadObjectFromMap(logging.LoggingKey, s.cattleConfig, loggingConfig) + + err = logging.SetLogger(loggingConfig) + require.NoError(s.T(), err) + + clusterConfig := new(clusters.ClusterConfig) + operations.LoadObjectFromMap(defaults.ClusterConfigKey, s.cattleConfig, clusterConfig) + + rancherConfig := new(rancher.Config) + operations.LoadObjectFromMap(defaults.RancherConfigKey, s.cattleConfig, rancherConfig) + + provider := provisioning.CreateProvider(clusterConfig.Provider) + machineConfigSpec := provider.LoadMachineConfigFunc(s.cattleConfig) + + if rancherConfig.ClusterName == "" { + logrus.Info("Provisioning RKE2 cluster") + s.cluster, err = resources.ProvisionRKE2K3SCluster(s.T(), standardUserClient, extClusters.RKE2ClusterType.String(), provider, *clusterConfig, machineConfigSpec, nil, true, false) + require.NoError(s.T(), err) + } else { + logrus.Infof("Using existing cluster %s", rancherConfig.ClusterName) + s.cluster, err = client.Steve.SteveType(stevetypes.Provisioning).ByID("fleet-default/" + s.client.RancherConfig.ClusterName) + require.NoError(s.T(), err) + } +} + +func (s *SnapshotRecurringTestSuite) TestSnapshotRecurringRestores() { + snapshotRestoreFiveTimes := &etcdsnapshot.Config{ + UpgradeKubernetesVersion: "", + SnapshotRestore: "none", + RecurringRestores: 5, + } + + tests := []struct { + name string + etcdSnapshot *etcdsnapshot.Config + cluster *v1.SteveAPIObject + }{ + {"RKE2_Recurring_Restores", snapshotRestoreFiveTimes, s.cluster}, + } + + for _, tt := range tests { + var err error + s.Run(tt.name, func() { + cluster, err := s.client.Steve.SteveType(stevetypes.Provisioning).ByID(tt.cluster.ID) + require.NoError(s.T(), err) + + err = etcdsnapshot.CreateAndValidateSnapshotRestore(s.client, cluster.Name, tt.etcdSnapshot, containerImage) + require.NoError(s.T(), err) + }) + + params := provisioning.GetProvisioningSchemaParams(s.client, s.cattleConfig) + err = qase.UpdateSchemaParameters(tt.name, params) + if err != nil { + logrus.Warningf("Failed to upload schema parameters %s", err) + } + } +} + +func TestSnapshotRecurringTestSuite(t *testing.T) { + suite.Run(t, new(SnapshotRecurringTestSuite)) +} diff --git a/validation/snapshot/rke2/snapshot_restore_test.go b/validation/snapshot/rke2/snapshot_restore_test.go new file mode 100644 index 000000000..89c6da6e0 --- /dev/null +++ b/validation/snapshot/rke2/snapshot_restore_test.go @@ -0,0 +1,144 @@ +//go:build (validation || recurring || extended || infra.any || cluster.any) && !sanity && !stress + +package rke2 + +import ( + "os" + "testing" + + "github.com/rancher/shepherd/clients/rancher" + v1 "github.com/rancher/shepherd/clients/rancher/v1" + extClusters "github.com/rancher/shepherd/extensions/clusters" + "github.com/rancher/shepherd/extensions/defaults/stevetypes" + "github.com/rancher/shepherd/pkg/config" + "github.com/rancher/shepherd/pkg/config/operations" + "github.com/rancher/shepherd/pkg/session" + "github.com/rancher/tests/actions/clusters" + "github.com/rancher/tests/actions/config/defaults" + "github.com/rancher/tests/actions/etcdsnapshot" + "github.com/rancher/tests/actions/logging" + "github.com/rancher/tests/actions/provisioning" + "github.com/rancher/tests/actions/qase" + resources "github.com/rancher/tests/validation/provisioning/resources/provisioncluster" + standard "github.com/rancher/tests/validation/provisioning/resources/standarduser" + "github.com/sirupsen/logrus" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" +) + +const ( + containerImage = "nginx" + windowsContainerImage = "mcr.microsoft.com/windows/servercore/iis" +) + +type SnapshotRestoreTestSuite struct { + suite.Suite + session *session.Session + client *rancher.Client + cattleConfig map[string]any + cluster *v1.SteveAPIObject +} + +func (s *SnapshotRestoreTestSuite) TearDownSuite() { + s.session.Cleanup() +} + +func (s *SnapshotRestoreTestSuite) SetupSuite() { + testSession := session.NewSession() + s.session = testSession + + client, err := rancher.NewClient("", s.session) + require.NoError(s.T(), err) + + s.client = client + + standardUserClient, _, _, err := standard.CreateStandardUser(s.client) + require.NoError(s.T(), err) + + s.cattleConfig = config.LoadConfigFromFile(os.Getenv(config.ConfigEnvironmentKey)) + + s.cattleConfig, err = defaults.LoadPackageDefaults(s.cattleConfig, "") + require.NoError(s.T(), err) + + loggingConfig := new(logging.Logging) + operations.LoadObjectFromMap(logging.LoggingKey, s.cattleConfig, loggingConfig) + + err = logging.SetLogger(loggingConfig) + require.NoError(s.T(), err) + + clusterConfig := new(clusters.ClusterConfig) + operations.LoadObjectFromMap(defaults.ClusterConfigKey, s.cattleConfig, clusterConfig) + + rancherConfig := new(rancher.Config) + operations.LoadObjectFromMap(defaults.RancherConfigKey, s.cattleConfig, rancherConfig) + + provider := provisioning.CreateProvider(clusterConfig.Provider) + machineConfigSpec := provider.LoadMachineConfigFunc(s.cattleConfig) + + if rancherConfig.ClusterName == "" { + logrus.Info("Provisioning RKE2 cluster") + s.cluster, err = resources.ProvisionRKE2K3SCluster(s.T(), standardUserClient, extClusters.RKE2ClusterType.String(), provider, *clusterConfig, machineConfigSpec, nil, false, false) + require.NoError(s.T(), err) + } else { + logrus.Infof("Using existing cluster %s", rancherConfig.ClusterName) + s.cluster, err = client.Steve.SteveType(stevetypes.Provisioning).ByID("fleet-default/" + s.client.RancherConfig.ClusterName) + require.NoError(s.T(), err) + + } +} + +func snapshotRestoreConfigs() []*etcdsnapshot.Config { + return []*etcdsnapshot.Config{ + { + UpgradeKubernetesVersion: "", + SnapshotRestore: "none", + RecurringRestores: 1, + }, + { + UpgradeKubernetesVersion: "", + SnapshotRestore: "kubernetesVersion", + RecurringRestores: 1, + }, + { + UpgradeKubernetesVersion: "", + SnapshotRestore: "all", + ControlPlaneConcurrencyValue: "15%", + WorkerConcurrencyValue: "20%", + RecurringRestores: 1, + }, + } +} + +func (s *SnapshotRestoreTestSuite) TestSnapshotRestore() { + snapshotRestoreConfig := snapshotRestoreConfigs() + tests := []struct { + name string + etcdSnapshot *etcdsnapshot.Config + cluster *v1.SteveAPIObject + }{ + {"RKE2_Restore_ETCD", snapshotRestoreConfig[0], s.cluster}, + {"RKE2_Restore_ETCD_K8sVersion", snapshotRestoreConfig[1], s.cluster}, + {"RKE2_Restore_Upgrade_Strategy", snapshotRestoreConfig[2], s.cluster}, + } + + for _, tt := range tests { + var err error + s.Run(tt.name, func() { + cluster, err := s.client.Steve.SteveType(stevetypes.Provisioning).ByID(tt.cluster.ID) + require.NoError(s.T(), err) + + err = etcdsnapshot.CreateAndValidateSnapshotRestore(s.client, cluster.Name, tt.etcdSnapshot, containerImage) + require.NoError(s.T(), err) + }) + + params := provisioning.GetProvisioningSchemaParams(s.client, s.cattleConfig) + err = qase.UpdateSchemaParameters(tt.name, params) + if err != nil { + logrus.Warningf("Failed to upload schema parameters %s", err) + } + } +} + +func TestSnapshotRestoreTestSuite(t *testing.T) { + suite.Run(t, new(SnapshotRestoreTestSuite)) +} diff --git a/validation/snapshot/rke2k3s/snapshot_restore_wins_test.go b/validation/snapshot/rke2/snapshot_restore_wins_test.go similarity index 77% rename from validation/snapshot/rke2k3s/snapshot_restore_wins_test.go rename to validation/snapshot/rke2/snapshot_restore_wins_test.go index 2faf43189..4dbf36a18 100644 --- a/validation/snapshot/rke2k3s/snapshot_restore_wins_test.go +++ b/validation/snapshot/rke2/snapshot_restore_wins_test.go @@ -1,6 +1,6 @@ //go:build validation || recurring -package rke2k3s +package rke2 import ( "os" @@ -33,7 +33,7 @@ type SnapshotRestoreWindowsTestSuite struct { session *session.Session client *rancher.Client cattleConfig map[string]any - rke2Cluster *v1.SteveAPIObject + cluster *v1.SteveAPIObject } func (s *SnapshotRestoreWindowsTestSuite) TearDownSuite() { @@ -69,6 +69,9 @@ func (s *SnapshotRestoreWindowsTestSuite) SetupSuite() { awsEC2Configs := new(ec2.AWSEC2Configs) operations.LoadObjectFromMap(ec2.ConfigurationFileKey, s.cattleConfig, awsEC2Configs) + rancherConfig := new(rancher.Config) + operations.LoadObjectFromMap(defaults.RancherConfigKey, s.cattleConfig, rancherConfig) + nodeRolesStandard := []provisioninginput.MachinePools{ provisioninginput.EtcdMachinePool, provisioninginput.ControlPlaneMachinePool, @@ -85,10 +88,16 @@ func (s *SnapshotRestoreWindowsTestSuite) SetupSuite() { provider := provisioning.CreateProvider(clusterConfig.Provider) machineConfigSpec := provider.LoadMachineConfigFunc(s.cattleConfig) + if rancherConfig.ClusterName == "" { - logrus.Info("Provisioning RKE2 windows cluster") - s.rke2Cluster, err = resources.ProvisionRKE2K3SCluster(s.T(), standardUserClient, extClusters.RKE2ClusterType.String(), provider, *clusterConfig, machineConfigSpec, awsEC2Configs, true, true) - require.NoError(s.T(), err) + logrus.Info("Provisioning RKE2 windows cluster") + s.cluster, err = resources.ProvisionRKE2K3SCluster(s.T(), standardUserClient, extClusters.RKE2ClusterType.String(), provider, *clusterConfig, machineConfigSpec, awsEC2Configs, true, true) + require.NoError(s.T(), err) + } else { + logrus.Infof("Using existing cluster %s", rancherConfig.ClusterName) + s.cluster, err = client.Steve.SteveType(stevetypes.Provisioning).ByID("fleet-default/" + s.client.RancherConfig.ClusterName) + require.NoError(s.T(), err) + } } func (s *SnapshotRestoreWindowsTestSuite) TestSnapshotRestoreWindows() { @@ -101,17 +110,18 @@ func (s *SnapshotRestoreWindowsTestSuite) TestSnapshotRestoreWindows() { tests := []struct { name string etcdSnapshot *etcdsnapshot.Config - clusterID string + cluster *v1.SteveAPIObject }{ - {"RKE2_Windows_Restore", snapshotRestoreNone, s.rke2Cluster.ID}, + {"RKE2_Windows_Restore", snapshotRestoreNone, s.cluster}, } for _, tt := range tests { - cluster, err := s.client.Steve.SteveType(stevetypes.Provisioning).ByID(tt.clusterID) - require.NoError(s.T(), err) - + var err error s.Run(tt.name, func() { - err := etcdsnapshot.CreateAndValidateSnapshotRestore(s.client, cluster.Name, tt.etcdSnapshot, windowsContainerImage) + cluster, err := s.client.Steve.SteveType(stevetypes.Provisioning).ByID(tt.cluster.ID) + require.NoError(s.T(), err) + + err = etcdsnapshot.CreateAndValidateSnapshotRestore(s.client, cluster.Name, tt.etcdSnapshot, windowsContainerImage) require.NoError(s.T(), err) }) diff --git a/validation/snapshot/rke2k3s/snapshot_retention_test.go b/validation/snapshot/rke2/snapshot_retention_test.go similarity index 66% rename from validation/snapshot/rke2k3s/snapshot_retention_test.go rename to validation/snapshot/rke2/snapshot_retention_test.go index 07fd71d43..d190ddb07 100644 --- a/validation/snapshot/rke2k3s/snapshot_retention_test.go +++ b/validation/snapshot/rke2/snapshot_retention_test.go @@ -1,6 +1,6 @@ //go:build (validation || extended || infra.any || cluster.any) && !sanity && !stress -package rke2k3s +package rke2 import ( "fmt" @@ -32,8 +32,7 @@ type SnapshotRetentionTestSuite struct { session *session.Session client *rancher.Client cattleConfig map[string]any - rke2Cluster *v1.SteveAPIObject - k3sCluster *v1.SteveAPIObject + cluster *v1.SteveAPIObject snapshotConfig *SnapshotRetentionConfig } @@ -73,45 +72,49 @@ func (s *SnapshotRetentionTestSuite) SetupSuite() { clusterConfig := new(clusters.ClusterConfig) operations.LoadObjectFromMap(defaults.ClusterConfigKey, s.cattleConfig, clusterConfig) + rancherConfig := new(rancher.Config) + operations.LoadObjectFromMap(defaults.RancherConfigKey, s.cattleConfig, rancherConfig) + provider := provisioning.CreateProvider(clusterConfig.Provider) machineConfigSpec := provider.LoadMachineConfigFunc(s.cattleConfig) - logrus.Info("Provisioning RKE2 cluster") - s.rke2Cluster, err = resources.ProvisionRKE2K3SCluster(s.T(), standardUserClient, extClusters.RKE2ClusterType.String(), provider, *clusterConfig, machineConfigSpec, nil, true, false) - require.NoError(s.T(), err) - - logrus.Info("Provisioning K3S cluster") - s.k3sCluster, err = resources.ProvisionRKE2K3SCluster(s.T(), standardUserClient, extClusters.K3SClusterType.String(), provider, *clusterConfig, machineConfigSpec, nil, true, false) - require.NoError(s.T(), err) + if rancherConfig.ClusterName == "" { + logrus.Info("Provisioning RKE2 cluster") + s.cluster, err = resources.ProvisionRKE2K3SCluster(s.T(), standardUserClient, extClusters.RKE2ClusterType.String(), provider, *clusterConfig, machineConfigSpec, nil, false, false) + require.NoError(s.T(), err) + } else { + logrus.Infof("Using existing cluster %s", rancherConfig.ClusterName) + s.cluster, err = client.Steve.SteveType(stevetypes.Provisioning).ByID("fleet-default/" + s.client.RancherConfig.ClusterName) + require.NoError(s.T(), err) + } } func (s *SnapshotRetentionTestSuite) TestAutomaticSnapshotRetention() { tests := []struct { testName string - clusterID string + cluster *v1.SteveAPIObject retentionLimit int intervalBetweenSnapshots int }{ - {"RKE2_Retention_Limit", s.rke2Cluster.ID, 2, 1}, - {"K3S_Retention_Limit", s.k3sCluster.ID, 2, 1}, + {"RKE2_Retention_Limit", s.cluster, 2, 1}, } for _, tt := range tests { - cluster, err := s.client.Steve.SteveType(stevetypes.Provisioning).ByID(tt.clusterID) - require.NoError(s.T(), err) + s.Run(tt.testName, func() { + cluster, err := s.client.Steve.SteveType(stevetypes.Provisioning).ByID(tt.cluster.ID) + require.NoError(s.T(), err) - clusterObject, clusterResponse, err := extClusters.GetProvisioningClusterByName(s.client, cluster.Name, namespaces.FleetDefault) - require.NoError(s.T(), err) + clusterObject, clusterResponse, err := extClusters.GetProvisioningClusterByName(s.client, cluster.Name, namespaces.FleetDefault) + require.NoError(s.T(), err) - clusterObject.Spec.RKEConfig.ETCD.SnapshotRetention = tt.retentionLimit - cronSchedule := fmt.Sprintf("%s%v%s", "*/", tt.intervalBetweenSnapshots, " * * * *") - clusterObject.Spec.RKEConfig.ETCD.SnapshotScheduleCron = cronSchedule + clusterObject.Spec.RKEConfig.ETCD.SnapshotRetention = tt.retentionLimit + cronSchedule := fmt.Sprintf("%s%v%s", "*/", tt.intervalBetweenSnapshots, " * * * *") + clusterObject.Spec.RKEConfig.ETCD.SnapshotScheduleCron = cronSchedule - _, err = s.client.Steve.SteveType(stevetypes.Provisioning).Update(clusterResponse, clusterObject) - require.NoError(s.T(), err) + _, err = s.client.Steve.SteveType(stevetypes.Provisioning).Update(clusterResponse, clusterObject) + require.NoError(s.T(), err) - s.Run(tt.testName, func() { - err := etcdsnapshot.CreateSnapshotsUntilRetentionLimit(s.client, cluster.Name, tt.retentionLimit, tt.intervalBetweenSnapshots) + err = etcdsnapshot.CreateSnapshotsUntilRetentionLimit(s.client, cluster.Name, tt.retentionLimit, tt.intervalBetweenSnapshots) require.NoError(s.T(), err) }) } diff --git a/validation/snapshot/rke2/snapshot_s3_restore_test.go b/validation/snapshot/rke2/snapshot_s3_restore_test.go new file mode 100644 index 000000000..92791f694 --- /dev/null +++ b/validation/snapshot/rke2/snapshot_s3_restore_test.go @@ -0,0 +1,120 @@ +//go:build (validation || extended || infra.any || cluster.any) && !sanity && !stress + +package rke2 + +import ( + "os" + "testing" + + "github.com/rancher/shepherd/clients/rancher" + v1 "github.com/rancher/shepherd/clients/rancher/v1" + extClusters "github.com/rancher/shepherd/extensions/clusters" + "github.com/rancher/shepherd/extensions/defaults/stevetypes" + "github.com/rancher/shepherd/pkg/config" + "github.com/rancher/shepherd/pkg/config/operations" + "github.com/rancher/shepherd/pkg/session" + "github.com/rancher/tests/actions/clusters" + "github.com/rancher/tests/actions/config/defaults" + "github.com/rancher/tests/actions/etcdsnapshot" + "github.com/rancher/tests/actions/logging" + "github.com/rancher/tests/actions/provisioning" + "github.com/rancher/tests/actions/qase" + resources "github.com/rancher/tests/validation/provisioning/resources/provisioncluster" + standard "github.com/rancher/tests/validation/provisioning/resources/standarduser" + "github.com/sirupsen/logrus" + + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" +) + +type S3SnapshotRestoreTestSuite struct { + suite.Suite + session *session.Session + client *rancher.Client + cattleConfig map[string]any + cluster *v1.SteveAPIObject +} + +func (s *S3SnapshotRestoreTestSuite) TearDownSuite() { + s.session.Cleanup() +} + +func (s *S3SnapshotRestoreTestSuite) SetupSuite() { + testSession := session.NewSession() + s.session = testSession + + client, err := rancher.NewClient("", s.session) + require.NoError(s.T(), err) + + s.client = client + + standardUserClient, _, _, err := standard.CreateStandardUser(s.client) + require.NoError(s.T(), err) + + s.cattleConfig = config.LoadConfigFromFile(os.Getenv(config.ConfigEnvironmentKey)) + + s.cattleConfig, err = defaults.LoadPackageDefaults(s.cattleConfig, "") + require.NoError(s.T(), err) + + loggingConfig := new(logging.Logging) + operations.LoadObjectFromMap(logging.LoggingKey, s.cattleConfig, loggingConfig) + + err = logging.SetLogger(loggingConfig) + require.NoError(s.T(), err) + + clusterConfig := new(clusters.ClusterConfig) + operations.LoadObjectFromMap(defaults.ClusterConfigKey, s.cattleConfig, clusterConfig) + + rancherConfig := new(rancher.Config) + operations.LoadObjectFromMap(defaults.RancherConfigKey, s.cattleConfig, rancherConfig) + + provider := provisioning.CreateProvider(clusterConfig.Provider) + machineConfigSpec := provider.LoadMachineConfigFunc(s.cattleConfig) + + if rancherConfig.ClusterName == "" { + logrus.Info("Provisioning RKE2 cluster") + s.cluster, err = resources.ProvisionRKE2K3SCluster(s.T(), standardUserClient, extClusters.RKE2ClusterType.String(), provider, *clusterConfig, machineConfigSpec, nil, false, false) + require.NoError(s.T(), err) + } else { + logrus.Infof("Using existing cluster %s", rancherConfig.ClusterName) + s.cluster, err = client.Steve.SteveType(stevetypes.Provisioning).ByID("fleet-default/" + s.client.RancherConfig.ClusterName) + require.NoError(s.T(), err) + } +} + +func (s *S3SnapshotRestoreTestSuite) TestS3SnapshotRestore() { + snapshotRestoreNone := &etcdsnapshot.Config{ + UpgradeKubernetesVersion: "", + SnapshotRestore: "none", + RecurringRestores: 1, + } + + tests := []struct { + name string + etcdSnapshot *etcdsnapshot.Config + cluster *v1.SteveAPIObject + }{ + {"RKE2_S3_Restore", snapshotRestoreNone, s.cluster}, + } + + for _, tt := range tests { + var err error + s.Run(tt.name, func() { + cluster, err := s.client.Steve.SteveType(stevetypes.Provisioning).ByID(tt.cluster.ID) + require.NoError(s.T(), err) + + err = etcdsnapshot.CreateAndValidateSnapshotRestore(s.client, cluster.Name, tt.etcdSnapshot, containerImage) + require.NoError(s.T(), err) + }) + + params := provisioning.GetProvisioningSchemaParams(s.client, s.cattleConfig) + err = qase.UpdateSchemaParameters(tt.name, params) + if err != nil { + logrus.Warningf("Failed to upload schema parameters %s", err) + } + } +} + +func TestS3SnapshotRestoreTestSuite(t *testing.T) { + suite.Run(t, new(S3SnapshotRestoreTestSuite)) +} diff --git a/validation/snapshot/rke2k3s/snapshot_restore_existing_cluster_test.go b/validation/snapshot/rke2k3s/snapshot_restore_existing_cluster_test.go deleted file mode 100644 index ada5b8e7d..000000000 --- a/validation/snapshot/rke2k3s/snapshot_restore_existing_cluster_test.go +++ /dev/null @@ -1,105 +0,0 @@ -//go:build (validation || extended || infra.any || cluster.any) && !sanity && !stress - -package rke2k3s - -import ( - "os" - "testing" - - "github.com/rancher/shepherd/clients/rancher" - v1 "github.com/rancher/shepherd/clients/rancher/v1" - "github.com/rancher/shepherd/extensions/defaults/stevetypes" - "github.com/rancher/shepherd/pkg/config" - "github.com/rancher/shepherd/pkg/config/operations" - "github.com/rancher/shepherd/pkg/session" - "github.com/rancher/tests/actions/config/defaults" - "github.com/rancher/tests/actions/etcdsnapshot" - "github.com/rancher/tests/actions/logging" - "github.com/stretchr/testify/require" - "github.com/stretchr/testify/suite" -) - -type SnapshotRestoreExistingClusterTestSuite struct { - suite.Suite - session *session.Session - client *rancher.Client - cattleConfig map[string]any - clusterObject *v1.SteveAPIObject -} - -func (s *SnapshotRestoreExistingClusterTestSuite) TearDownSuite() { - s.session.Cleanup() -} - -func (s *SnapshotRestoreExistingClusterTestSuite) SetupSuite() { - testSession := session.NewSession() - s.session = testSession - - client, err := rancher.NewClient("", s.session) - require.NoError(s.T(), err) - - s.client = client - - s.cattleConfig = config.LoadConfigFromFile(os.Getenv(config.ConfigEnvironmentKey)) - - s.cattleConfig, err = defaults.LoadPackageDefaults(s.cattleConfig, "") - require.NoError(s.T(), err) - - loggingConfig := new(logging.Logging) - operations.LoadObjectFromMap(logging.LoggingKey, s.cattleConfig, loggingConfig) - - err = logging.SetLogger(loggingConfig) - require.NoError(s.T(), err) - - s.clusterObject, err = client.Steve.SteveType(stevetypes.Provisioning).ByID("fleet-default/" + s.client.RancherConfig.ClusterName) - require.NoError(s.T(), err) -} - -func snapshotRestoreExistingClusterConfigs() []*etcdsnapshot.Config { - return []*etcdsnapshot.Config{ - { - UpgradeKubernetesVersion: "", - SnapshotRestore: "none", - RecurringRestores: 1, - }, - { - UpgradeKubernetesVersion: "", - SnapshotRestore: "kubernetesVersion", - RecurringRestores: 1, - }, - { - UpgradeKubernetesVersion: "", - SnapshotRestore: "all", - ControlPlaneConcurrencyValue: "15%", - WorkerConcurrencyValue: "20%", - RecurringRestores: 1, - }, - } -} - -func (s *SnapshotRestoreExistingClusterTestSuite) TestSnapshotRestoreExistingCluster() { - snapshotRestoreConfig := snapshotRestoreExistingClusterConfigs() - tests := []struct { - name string - etcdSnapshot *etcdsnapshot.Config - clusterID string - }{ - {"RKE2_Restore_ETCD", snapshotRestoreConfig[0], s.clusterObject.ID}, - {"RKE2_Restore_ETCD_K8sVersion", snapshotRestoreConfig[1], s.clusterObject.ID}, - {"RKE2K3S_Restore_Upgrade_Strategy", snapshotRestoreConfig[2], s.clusterObject.ID}, - } - - for _, tt := range tests { - cluster, err := s.client.Steve.SteveType(stevetypes.Provisioning).ByID(tt.clusterID) - require.NoError(s.T(), err) - - s.Run(tt.name, func() { - err := etcdsnapshot.CreateAndValidateSnapshotRestore(s.client, cluster.Name, tt.etcdSnapshot, "nginx") - require.NoError(s.T(), err) - }) - } -} - -func TestSnapshotRestoreExistingClusterTestSuite(t *testing.T) { - suite.Run(t, new(SnapshotRestoreExistingClusterTestSuite)) -}