Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 8 additions & 1 deletion cmd/openshift-tests/openshift-tests.go
Original file line number Diff line number Diff line change
Expand Up @@ -63,8 +63,15 @@ func main() {
os.Unsetenv("ENABLE_STORAGE_GCE_PD_DRIVER")
}

// Detect intentionally degraded clusters (e.g. TNF degraded) based on
// CI-provided environment signals and propagate that context to extended tests
if os.Getenv("DEGRADED_NODE") == "true" {
exutil.ClusterDegraded = true
logrus.Infof("openshift-tests targeting intentionally degraded cluster")
}

pflag.CommandLine.SetNormalizeFunc(utilflag.WordSepNormalizeFunc)
//pflag.CommandLine.AddGoFlagSet(goflag.CommandLine)
// pflag.CommandLine.AddGoFlagSet(goflag.CommandLine)

extensionRegistry, originExtension, err := extensions.InitializeOpenShiftTestsExtensionFramework()
if err != nil {
Expand Down
44 changes: 34 additions & 10 deletions test/extended/operators/certs.go
Original file line number Diff line number Diff line change
Expand Up @@ -128,8 +128,17 @@ var _ = g.Describe(fmt.Sprintf("[sig-arch][Late][Jira:%q]", "kube-apiserver"), g
// Skip metal jobs if test image pullspec cannot be determined
if jobType.Platform != "metal" || err == nil {
o.Expect(err).NotTo(o.HaveOccurred())
onDiskPKIContent, err = fetchOnDiskCertificates(ctx, kubeClient, oc.AdminConfig(), masters, openshiftTestImagePullSpec)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

So the presence of unready masters on line 124 does not cause any issue?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

i wondered that aswell! turns out The fact that one master is NotReady shouldn’t affect gatherCertsFromPlatformNamespaces in line 124. It collects all its data from the cluster API and uses the masters slice just for name-rewriting and cleanup. Unlike the on-disk collection path, it doesn’t pin any helper pods to nodes or reach out to kubelets.

o.Expect(err).NotTo(o.HaveOccurred())

// Only relax on-disk cert collection when the cluster is intentionally degraded
// and the topology is Two-Node Fencing (DualReplica).
if exutil.ClusterDegraded && exutil.IsTwoNodeFencing(ctx, configClient) {
readyMasters, _ := filterReadyNodes(masters)
onDiskPKIContent, err = fetchOnDiskCertificates(ctx, kubeClient, oc.AdminConfig(), readyMasters, openshiftTestImagePullSpec)
o.Expect(err).NotTo(o.HaveOccurred())
} else {
onDiskPKIContent, err = fetchOnDiskCertificates(ctx, kubeClient, oc.AdminConfig(), masters, openshiftTestImagePullSpec)
o.Expect(err).NotTo(o.HaveOccurred())
}
}

actualPKIContent = certgraphanalysis.MergePKILists(ctx, inClusterPKIContent, onDiskPKIContent)
Expand Down Expand Up @@ -160,14 +169,13 @@ var _ = g.Describe(fmt.Sprintf("[sig-arch][Late][Jira:%q]", "kube-apiserver"), g
o.Expect(err).NotTo(o.HaveOccurred())

pkiDir := filepath.Join(exutil.ArtifactDirPath(), "rawTLSInfo")
err = os.MkdirAll(pkiDir, 0755)
err = os.MkdirAll(pkiDir, 0o755)
o.Expect(err).NotTo(o.HaveOccurred())
err = os.WriteFile(filepath.Join(pkiDir, tlsArtifactFilename), jsonBytes, 0644)
err = os.WriteFile(filepath.Join(pkiDir, tlsArtifactFilename), jsonBytes, 0o644)
o.Expect(err).NotTo(o.HaveOccurred())
})

g.It("all tls artifacts must be registered", func() {

violationsPKIContent, err := certs.GetPKIInfoFromEmbeddedOwnership(ownership.PKIViolations)
o.Expect(err).NotTo(o.HaveOccurred())

Expand All @@ -181,7 +189,6 @@ var _ = g.Describe(fmt.Sprintf("[sig-arch][Late][Jira:%q]", "kube-apiserver"), g

_, err := certgraphutils.LocateCertKeyPairBySecretLocation(currLocation, expectedPKIContent.CertKeyPairs)
if err != nil {

newTLSRegistry.CertKeyPairs = append(newTLSRegistry.CertKeyPairs, certgraphapi.PKIRegistryCertKeyPair{InClusterLocation: &actualPKIContent.InClusterResourceData.CertKeyPairs[i]})
}

Expand Down Expand Up @@ -269,11 +276,11 @@ var _ = g.Describe(fmt.Sprintf("[sig-arch][Late][Jira:%q]", "kube-apiserver"), g
if len(newTLSRegistry.CertKeyPairs) > 0 || len(newTLSRegistry.CertificateAuthorityBundles) > 0 {
registryString, err := json.MarshalIndent(newTLSRegistry, "", " ")
if err != nil {
//g.Fail("Failed to marshal registry %#v: %v", newTLSRegistry, err)
// g.Fail("Failed to marshal registry %#v: %v", newTLSRegistry, err)
testresult.Flakef("Failed to marshal registry %#v: %v", newTLSRegistry, err)
}
// TODO: uncomment when test no longer fails and enhancement is merged
//g.Fail(fmt.Sprintf("Unregistered TLS certificates:\n%s", registryString))
// g.Fail(fmt.Sprintf("Unregistered TLS certificates:\n%s", registryString))
testresult.Flakef("Unregistered TLS certificates found:\n%s\nSee tls/ownership/README.md in origin repo", registryString)
}
})
Expand All @@ -285,7 +292,7 @@ var _ = g.Describe(fmt.Sprintf("[sig-arch][Late][Jira:%q]", "kube-apiserver"), g

if len(messages) > 0 {
// TODO: uncomment when test no longer fails and enhancement is merged
//g.Fail(strings.Join(messages, "\n"))
// g.Fail(strings.Join(messages, "\n"))
testresult.Flakef("%s", strings.Join(messages, "\n"))
}
})
Expand Down Expand Up @@ -323,7 +330,6 @@ var _ = g.Describe(fmt.Sprintf("[sig-arch][Late][Jira:%q]", "kube-apiserver"), g
testresult.Flakef("Errors found: %s", utilerrors.NewAggregate(errs).Error())
}
})

})

func fetchOnDiskCertificates(ctx context.Context, kubeClient kubernetes.Interface, podRESTConfig *rest.Config, nodeList []*corev1.Node, testPullSpec string) (*certgraphapi.PKIList, error) {
Expand Down Expand Up @@ -480,3 +486,21 @@ func isCertKeyPairFromIgnoredNamespace(cert certgraphapi.CertKeyPair, ignoredNam
}
return false
}

func filterReadyNodes(nodes []*corev1.Node) (ready []*corev1.Node, notReady []string) {
for _, n := range nodes {
isReady := false
for _, c := range n.Status.Conditions {
if c.Type == corev1.NodeReady && c.Status == corev1.ConditionTrue {
isReady = true
break
}
}
if isReady {
ready = append(ready, n)
} else {
notReady = append(notReady, n.Name)
}
}
return ready, notReady
}
13 changes: 7 additions & 6 deletions test/extended/util/framework.go
Original file line number Diff line number Diff line change
Expand Up @@ -327,7 +327,6 @@ func WaitForOpenShiftNamespaceImageStreams(oc *CLI) error {

// Check to see if SamplesOperator managementState is Removed
out, err := oc.AsAdmin().Run("get").Args("configs.samples.operator.openshift.io", "cluster", "-o", "yaml").Output()

if err != nil {
e2e.Logf("\n error on getting samples operator CR: %+v\n%#v\n", err, out)
}
Expand Down Expand Up @@ -781,7 +780,7 @@ func VarSubOnFile(srcFile string, destFile string, vars map[string]string) error
k = "${" + k + "}"
srcString = strings.Replace(srcString, k, v, -1) // -1 means unlimited replacements
}
err = ioutil.WriteFile(destFile, []byte(srcString), 0644)
err = ioutil.WriteFile(destFile, []byte(srcString), 0o644)
}
return err
}
Expand Down Expand Up @@ -1654,11 +1653,11 @@ func restoreFixtureAsset(dir, name string) error {
if err != nil {
return err
}
err = os.MkdirAll(assetFilePath(dir, filepath.Dir(name)), os.FileMode(0755))
err = os.MkdirAll(assetFilePath(dir, filepath.Dir(name)), os.FileMode(0o755))
if err != nil {
return err
}
err = ioutil.WriteFile(assetFilePath(dir, name), data, 0640)
err = ioutil.WriteFile(assetFilePath(dir, name), data, 0o640)
if err != nil {
return err
}
Expand Down Expand Up @@ -1990,10 +1989,10 @@ type GitRepo struct {
// AddAndCommit commits a file with its content to local repo
func (r GitRepo) AddAndCommit(file, content string) error {
dir := filepath.Dir(file)
if err := os.MkdirAll(filepath.Join(r.RepoPath, dir), 0777); err != nil {
if err := os.MkdirAll(filepath.Join(r.RepoPath, dir), 0o777); err != nil {
return err
}
if err := ioutil.WriteFile(filepath.Join(r.RepoPath, file), []byte(content), 0666); err != nil {
if err := ioutil.WriteFile(filepath.Join(r.RepoPath, file), []byte(content), 0o666); err != nil {
return err
}
if err := r.repo.Add(r.RepoPath, file); err != nil {
Expand Down Expand Up @@ -2347,6 +2346,8 @@ func IsTwoNodeFencing(ctx context.Context, configClient clientconfigv1.Interface
return infrastructure.Status.ControlPlaneTopology == configv1.DualReplicaTopologyMode
}

var ClusterDegraded bool

func groupName(groupVersionName string) string {
return strings.Split(groupVersionName, "/")[0]
}
Expand Down