Skip to content

terraform test: refactor manifest file for simplicity #37412

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 9 commits into
base: liamcervante/f-controlling-destroys
Choose a base branch
from
Open
175 changes: 99 additions & 76 deletions internal/backend/local/test.go
Original file line number Diff line number Diff line change
Expand Up @@ -12,13 +12,13 @@ import (
"slices"

"github.com/hashicorp/terraform/internal/backend"

"github.com/hashicorp/terraform/internal/backend/backendrun"
"github.com/hashicorp/terraform/internal/command/junit"
"github.com/hashicorp/terraform/internal/command/views"
"github.com/hashicorp/terraform/internal/configs"
"github.com/hashicorp/terraform/internal/moduletest"
"github.com/hashicorp/terraform/internal/moduletest/graph"
teststates "github.com/hashicorp/terraform/internal/moduletest/states"
"github.com/hashicorp/terraform/internal/terraform"
"github.com/hashicorp/terraform/internal/tfdiags"
)
Expand All @@ -44,9 +44,8 @@ type TestSuiteRunner struct {

Opts *terraform.ContextOpts

View views.Test
JUnit junit.JUnit
Manifest *graph.TestManifest
View views.Test
JUnit junit.JUnit

// Stopped and Cancelled track whether the user requested the testing
// process to be interrupted. Stopped is a nice graceful exit, we'll still
Expand All @@ -72,7 +71,6 @@ type TestSuiteRunner struct {

Concurrency int
DeferralAllowed bool
Concurrency int

CommandMode moduletest.CommandMode

Expand Down Expand Up @@ -108,6 +106,14 @@ func (runner *TestSuiteRunner) Test() (moduletest.Status, tfdiags.Diagnostics) {
return moduletest.Error, diags
}

manifest, err := teststates.LoadManifest(".")
if err != nil {
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Error,
"Failed to open state manifest",
fmt.Sprintf("The test state manifest file could not be opened: %s.", err)))
}

runner.View.Abstract(suite)

// We have two sets of variables that are available to different test files.
Expand All @@ -120,76 +126,29 @@ func (runner *TestSuiteRunner) Test() (moduletest.Status, tfdiags.Diagnostics) {
// collisions, as the test directory variables should take precedence.
maps.Copy(testDirectoryGlobalVariables, runner.GlobalTestVariables)

// Generate a manifest that will be used to track the state files created
// during the test runs.
manifest, err := func() (*graph.TestManifest, tfdiags.Diagnostics) {
manifest, err := graph.BuildStateManifest(".", suite.Files)
if err != nil {
return manifest, diags.Append(tfdiags.Sourceless(tfdiags.Error, "Failed to build state manifest", err.Error()))
}

empty, err := manifest.Empty()
if err != nil {
return manifest, diags.Append(tfdiags.Sourceless(tfdiags.Error, "Error checking state manifest", err.Error()))
}

// Unless we're in cleanup mode, we expect the manifest to be empty.
if !empty && runner.CommandMode != moduletest.CleanupMode {
return manifest, diags.Append(tfdiags.Sourceless(tfdiags.Error, "State manifest not empty", ``+
"The state manifest should be empty before running tests. This could be due to a previous test run not cleaning up after itself."+
"Please ensure that all state files are cleaned up before running tests."))
}
return manifest, nil
}()

if err != nil {
suite.Status = moduletest.Error
runner.View.Conclusion(suite)
return moduletest.Error, diags
}
runner.Manifest = manifest

suite.Status = moduletest.Pending
for _, name := range slices.Sorted(maps.Keys(suite.Files)) {
if runner.Cancelled {
return suite.Status, diags
}

file := suite.Files[name]

currentGlobalVariables := runner.GlobalVariables
if filepath.Dir(file.Name) == runner.TestingDirectory {
// If the file is in the test directory, we'll use the union of the
// global variables and the global test variables.
currentGlobalVariables = testDirectoryGlobalVariables
}

evalCtx := graph.NewEvalContext(graph.EvalContextOpts{
Config: runner.Config,
CancelCtx: runner.CancelledCtx,
StopCtx: runner.StoppedCtx,
Verbose: runner.Verbose,
Repair: runner.Repair,
Render: runner.View,
UnparsedVariables: currentGlobalVariables,
Concurrency: runner.Concurrency,
DeferralAllowed: runner.DeferralAllowed,
})

UnparsedVariables: currentGlobalVariables,
Manifest: manifest,
})
fileRunner := &TestFileRunner{
Suite: runner,
EvalContext: evalCtx,
Suite: runner,
TestDirectoryGlobalVariables: testDirectoryGlobalVariables,
Manifest: manifest,
}

runner.View.File(file, moduletest.Starting)
fileRunner.Test(file)
runner.View.File(file, moduletest.Complete)
suite.Status = suite.Status.Merge(file.Status)
}

if err := manifest.Save(); err != nil {
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Error,
"Failed to save state manifest",
fmt.Sprintf("The test state manifest file could not be saved: %s.", err)))
}
runner.View.Conclusion(suite)

if runner.JUnit != nil {
Expand All @@ -209,6 +168,7 @@ func (runner *TestSuiteRunner) collectTests() (*moduletest.Suite, tfdiags.Diagno

var diags tfdiags.Diagnostics
suite := &moduletest.Suite{
Status: moduletest.Pending,
CommandMode: runner.CommandMode,
Files: func() map[string]*moduletest.File {
files := make(map[string]*moduletest.File)
Expand Down Expand Up @@ -274,8 +234,9 @@ func (runner *TestSuiteRunner) collectTests() (*moduletest.Suite, tfdiags.Diagno
type TestFileRunner struct {
// Suite contains all the helpful metadata about the test that we need
// during the execution of a file.
Suite *TestSuiteRunner
EvalContext *graph.EvalContext
Suite *TestSuiteRunner
TestDirectoryGlobalVariables map[string]backendrun.UnparsedVariableValue
Manifest *teststates.TestManifest
}

func (runner *TestFileRunner) Test(file *moduletest.File) {
Expand All @@ -285,6 +246,25 @@ func (runner *TestFileRunner) Test(file *moduletest.File) {
// checking anything about them.
file.Diagnostics = file.Diagnostics.Append(file.Config.Validate(runner.Suite.Config))

states, stateDiags := runner.Manifest.LoadStates(file, runner.Suite.BackendFactory)
file.Diagnostics = file.Diagnostics.Append(stateDiags)
if stateDiags.HasErrors() {
file.Status = moduletest.Error
}

if runner.Suite.CommandMode != moduletest.CleanupMode {
// then we can't have any state files pending cleanup
for _, state := range states {
if state.Manifest.Reason != teststates.StateReasonNone {
file.Diagnostics = file.Diagnostics.Append(tfdiags.Sourceless(
tfdiags.Error,
"State manifest not empty",
fmt.Sprintf("The state manifest for %s should be empty before running tests. This could be due to a previous test run not cleaning up after itself. Please ensure that all state files are cleaned up before running tests.", file.Name)))
file.Status = moduletest.Error
}
}
}

// We'll execute the tests in the file. First, mark the overall status as
// being skipped. This will ensure that if we've cancelled and the files not
// going to do anything it'll be marked as skipped.
Expand All @@ -293,35 +273,78 @@ func (runner *TestFileRunner) Test(file *moduletest.File) {
// If we have zero run blocks then we'll just mark the file as passed.
file.Status = file.Status.Merge(moduletest.Pass)
return
} else if runner.Suite.CommandMode == moduletest.CleanupMode {
// In cleanup mode, we don't actually execute the run blocks so we'll
// start with the assumption they have all passed.
file.Status = file.Status.Merge(moduletest.Pass)
}

currentGlobalVariables := runner.Suite.GlobalVariables
if filepath.Dir(file.Name) == runner.Suite.TestingDirectory {
// If the file is in the test directory, we'll use the union of the
// global variables and the global test variables.
currentGlobalVariables = runner.TestDirectoryGlobalVariables
}

evalCtx := graph.NewEvalContext(graph.EvalContextOpts{
Config: runner.Suite.Config,
CancelCtx: runner.Suite.CancelledCtx,
StopCtx: runner.Suite.StoppedCtx,
Verbose: runner.Suite.Verbose,
Render: runner.Suite.View,
UnparsedVariables: currentGlobalVariables,
FileStates: states,
Concurrency: runner.Suite.Concurrency,
DeferralAllowed: runner.Suite.DeferralAllowed,
Mode: runner.Suite.CommandMode,
Repair: runner.Suite.Repair,
})

// Build the graph for the file.
b := graph.TestGraphBuilder{
Config: runner.Suite.Config,
File: file,
ContextOpts: runner.Suite.Opts,
BackendFactory: runner.Suite.BackendFactory,
StateManifest: runner.Suite.Manifest,
CommandMode: runner.Suite.CommandMode,
Config: runner.Suite.Config,
File: file,
ContextOpts: runner.Suite.Opts,
CommandMode: runner.Suite.CommandMode,
}
g, diags := b.Build(runner.EvalContext)
g, diags := b.Build()
file.Diagnostics = file.Diagnostics.Append(diags)
if walkCancelled := runner.renderPreWalkDiags(file); walkCancelled {
return
}

// walk and execute the graph
diags = diags.Append(graph.Walk(g, runner.EvalContext))

// Update the manifest file with the reason why each state file was created.
err := runner.Suite.Manifest.WriteManifest()
diags = diags.Append(err)
diags = diags.Append(graph.Walk(g, evalCtx))

// save any dangling state files. we'll check all the states we have in
// memory, and if any are skipped or errored it means we might want to do
// a cleanup command in the future. this means we need to save the other
// state files as dependencies in case they are needed during the cleanup.

saveDependencies := false
for _, state := range states {
if state.Manifest.Reason == teststates.StateReasonSkip || state.Manifest.Reason == teststates.StateReasonError {
saveDependencies = true // at least one state file does have resources left over
break
}
}
if saveDependencies {
for _, state := range states {
if state.Manifest.Reason == teststates.StateReasonNone {
// any states that have no reason to be saved, will be updated
// to the dependency reason and this will tell the manifest to
// save those state files as well.
state.Manifest.Reason = teststates.StateReasonDep
}
}
}
diags = diags.Append(runner.Manifest.SaveStates(file, states))

// If the graph walk was terminated, we don't want to add the diagnostics.
// The error the user receives will just be:
// Failure! 0 passed, 1 failed.
// exit status 1
if runner.EvalContext.Cancelled() {
if evalCtx.Cancelled() {
file.UpdateStatus(moduletest.Error)
log.Printf("[TRACE] TestFileRunner: graph walk terminated for %s", file.Name)
return
Expand Down
Loading