Fix lint errors for unused functions (#36)

The functionality under terraform/ isn't used anywhere at the moment and
the test doesn't pass for me. It will be useful down the line so
commenting out instead of removing.

Confirmed that staticcheck passes when run with:
```
staticcheck -checks U1000 ./...
```
This commit is contained in:
Pieter Noordhuis 2022-09-07 14:26:31 +02:00 committed by GitHub
parent 67b2e4206f
commit 31a841ff33
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 179 additions and 207 deletions

View File

@ -38,176 +38,157 @@ Let's see how far we can get without GRPC magic.
*/
package terraform
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"log"
"os"
"github.com/databricks/bricks/project"
"github.com/databricks/bricks/utilities"
"github.com/hashicorp/go-version"
"github.com/hashicorp/hc-install/product"
"github.com/hashicorp/hc-install/releases"
"github.com/hashicorp/terraform-exec/tfexec"
tfjson "github.com/hashicorp/terraform-json"
)
const DeploymentStateRemoteLocation = "dbfs:/FileStore/deployment-state"
type TerraformDeployer struct {
WorkDir string
CopyTfs bool
// type TerraformDeployer struct {
// WorkDir string
// CopyTfs bool
tf *tfexec.Terraform
}
// tf *tfexec.Terraform
// }
func (d *TerraformDeployer) Init(ctx context.Context) error {
if d.CopyTfs {
panic("copying tf configuration files to a temporary dir not yet implemented")
}
// TODO: most likely merge the methods
exec, err := newTerraform(ctx, d.WorkDir, map[string]string{})
if err != nil {
return err
}
d.tf = exec
return nil
}
// func (d *TerraformDeployer) Init(ctx context.Context) error {
// if d.CopyTfs {
// panic("copying tf configuration files to a temporary dir not yet implemented")
// }
// // TODO: most likely merge the methods
// exec, err := newTerraform(ctx, d.WorkDir, map[string]string{})
// if err != nil {
// return err
// }
// d.tf = exec
// return nil
// }
// returns location of terraform state on DBFS based on project's deployment isolation level.
func (d *TerraformDeployer) remoteTfstateLoc() string {
prefix := project.Current.DeploymentIsolationPrefix()
return fmt.Sprintf("%s/%s/terraform.tfstate", DeploymentStateRemoteLocation, prefix)
}
// // returns location of terraform state on DBFS based on project's deployment isolation level.
// func (d *TerraformDeployer) remoteTfstateLoc() string {
// prefix := project.Current.DeploymentIsolationPrefix()
// return fmt.Sprintf("%s/%s/terraform.tfstate", DeploymentStateRemoteLocation, prefix)
// }
// returns structured representation of terraform state on DBFS.
func (d *TerraformDeployer) remoteState(ctx context.Context) (*tfjson.State, int, error) {
raw, err := utilities.ReadDbfsFile(ctx,
project.Current.WorkspacesClient(),
d.remoteTfstateLoc(),
)
if err != nil {
return nil, 0, err
}
return d.tfstateFromReader(bytes.NewBuffer(raw))
}
// // returns structured representation of terraform state on DBFS.
// func (d *TerraformDeployer) remoteState(ctx context.Context) (*tfjson.State, int, error) {
// raw, err := utilities.ReadDbfsFile(ctx,
// project.Current.WorkspacesClient(),
// d.remoteTfstateLoc(),
// )
// if err != nil {
// return nil, 0, err
// }
// return d.tfstateFromReader(bytes.NewBuffer(raw))
// }
// opens file handle for local-backend terraform state, that has to be closed in the calling
// methods. this file alone is not the authoritative state of deployment and has to properly
// be synced with remote counterpart.
func (d *TerraformDeployer) openLocalState() (*os.File, error) {
return os.Open(fmt.Sprintf("%s/terraform.tfstate", d.WorkDir))
}
// // opens file handle for local-backend terraform state, that has to be closed in the calling
// // methods. this file alone is not the authoritative state of deployment and has to properly
// // be synced with remote counterpart.
// func (d *TerraformDeployer) openLocalState() (*os.File, error) {
// return os.Open(fmt.Sprintf("%s/terraform.tfstate", d.WorkDir))
// }
// returns structured representation of terraform state on local machine. as part of
// the optimistic concurrency control, please make sure to always compare the serial
// number of local and remote states before proceeding with deployment.
func (d *TerraformDeployer) localState() (*tfjson.State, int, error) {
local, err := d.openLocalState()
if err != nil {
return nil, 0, err
}
defer local.Close()
return d.tfstateFromReader(local)
}
// // returns structured representation of terraform state on local machine. as part of
// // the optimistic concurrency control, please make sure to always compare the serial
// // number of local and remote states before proceeding with deployment.
// func (d *TerraformDeployer) localState() (*tfjson.State, int, error) {
// local, err := d.openLocalState()
// if err != nil {
// return nil, 0, err
// }
// defer local.Close()
// return d.tfstateFromReader(local)
// }
// converts input stream into structured representation of terraform state and deployment
// serial number, that helps controlling versioning and synchronisation via optimistic locking.
func (d *TerraformDeployer) tfstateFromReader(reader io.Reader) (*tfjson.State, int, error) {
var state tfjson.State
state.UseJSONNumber(true)
decoder := json.NewDecoder(reader)
decoder.UseNumber()
err := decoder.Decode(&state)
if err != nil {
return nil, 0, err
}
err = state.Validate()
if err != nil {
return nil, 0, err
}
var serialWrapper struct {
Serial int `json:"serial,omitempty"`
}
// TODO: use byte buffer if this decoder fails on double reading
err = decoder.Decode(&serialWrapper)
if err != nil {
return nil, 0, err
}
return &state, serialWrapper.Serial, nil
}
// // converts input stream into structured representation of terraform state and deployment
// // serial number, that helps controlling versioning and synchronisation via optimistic locking.
// func (d *TerraformDeployer) tfstateFromReader(reader io.Reader) (*tfjson.State, int, error) {
// var state tfjson.State
// state.UseJSONNumber(true)
// decoder := json.NewDecoder(reader)
// decoder.UseNumber()
// err := decoder.Decode(&state)
// if err != nil {
// return nil, 0, err
// }
// err = state.Validate()
// if err != nil {
// return nil, 0, err
// }
// var serialWrapper struct {
// Serial int `json:"serial,omitempty"`
// }
// // TODO: use byte buffer if this decoder fails on double reading
// err = decoder.Decode(&serialWrapper)
// if err != nil {
// return nil, 0, err
// }
// return &state, serialWrapper.Serial, nil
// }
// uploads terraform state from local directory to designated DBFS location.
func (d *TerraformDeployer) uploadTfstate(ctx context.Context) error {
local, err := d.openLocalState()
if err != nil {
return err
}
defer local.Close()
raw, err := io.ReadAll(local)
if err != nil {
return err
}
// TODO: make sure that deployment locks are implemented
return utilities.CreateDbfsFile(ctx,
project.Current.WorkspacesClient(),
d.remoteTfstateLoc(),
raw,
true,
)
}
// // uploads terraform state from local directory to designated DBFS location.
// func (d *TerraformDeployer) uploadTfstate(ctx context.Context) error {
// local, err := d.openLocalState()
// if err != nil {
// return err
// }
// defer local.Close()
// raw, err := io.ReadAll(local)
// if err != nil {
// return err
// }
// // TODO: make sure that deployment locks are implemented
// return utilities.CreateDbfsFile(ctx,
// project.Current.WorkspacesClient(),
// d.remoteTfstateLoc(),
// raw,
// true,
// )
// }
// downloads terraform state from DBFS to local working directory.
func (d *TerraformDeployer) downloadTfstate(ctx context.Context) error {
remote, serialDeployed, err := d.remoteState(ctx)
if err != nil {
return err
}
log.Printf("[DEBUG] remote serial is %d", serialDeployed)
local, err := d.openLocalState()
if err != nil {
return err
}
defer local.Close()
raw, err := json.Marshal(remote)
if err != nil {
return err
}
_, err = io.Copy(local, bytes.NewBuffer(raw))
return err
}
// // downloads terraform state from DBFS to local working directory.
// func (d *TerraformDeployer) downloadTfstate(ctx context.Context) error {
// remote, serialDeployed, err := d.remoteState(ctx)
// if err != nil {
// return err
// }
// log.Printf("[DEBUG] remote serial is %d", serialDeployed)
// local, err := d.openLocalState()
// if err != nil {
// return err
// }
// defer local.Close()
// raw, err := json.Marshal(remote)
// if err != nil {
// return err
// }
// _, err = io.Copy(local, bytes.NewBuffer(raw))
// return err
// }
// installs terraform to a temporary directory (for now)
func installTerraform(ctx context.Context) (string, error) {
// TODO: let configuration and/or environment variable specify
// terraform binary. Or detect if terraform is installed in the $PATH
installer := &releases.ExactVersion{
Product: product.Terraform,
Version: version.Must(version.NewVersion("1.1.0")),
}
return installer.Install(ctx)
}
// // installs terraform to a temporary directory (for now)
// func installTerraform(ctx context.Context) (string, error) {
// // TODO: let configuration and/or environment variable specify
// // terraform binary. Or detect if terraform is installed in the $PATH
// installer := &releases.ExactVersion{
// Product: product.Terraform,
// Version: version.Must(version.NewVersion("1.1.0")),
// }
// return installer.Install(ctx)
// }
func newTerraform(ctx context.Context, workDir string, env map[string]string) (*tfexec.Terraform, error) {
execPath, err := installTerraform(ctx)
if err != nil {
return nil, err
}
// TODO: figure out how to cleanup/skip `.terraform*` files and dirs, not to confuse users
// one of the options: take entire working directory with *.tf files and move them to tmpdir.
// make it optional, of course, otherwise debugging may become super hard.
tf, err := tfexec.NewTerraform(workDir, execPath)
if err != nil {
return nil, err
}
err = tf.SetEnv(env)
if err != nil {
return nil, err
}
return tf, err
}
// func newTerraform(ctx context.Context, workDir string, env map[string]string) (*tfexec.Terraform, error) {
// execPath, err := installTerraform(ctx)
// if err != nil {
// return nil, err
// }
// // TODO: figure out how to cleanup/skip `.terraform*` files and dirs, not to confuse users
// // one of the options: take entire working directory with *.tf files and move them to tmpdir.
// // make it optional, of course, otherwise debugging may become super hard.
// tf, err := tfexec.NewTerraform(workDir, execPath)
// if err != nil {
// return nil, err
// }
// err = tf.SetEnv(env)
// if err != nil {
// return nil, err
// }
// return tf, err
// }

View File

@ -1,53 +1,44 @@
package terraform
import (
"context"
"path"
"testing"
// func TestSomething(t *testing.T) {
// ctx := context.Background()
// tf, err := newTerraform(ctx, "testdata/simplest", map[string]string{
// "DATABRICKS_HOST": "..",
// "DATABRICKS_TOKEN": "..",
// })
// assert.NoError(t, err)
"github.com/hashicorp/terraform-exec/tfexec"
"github.com/stretchr/testify/assert"
)
// err = tf.Init(ctx)
// assert.NoError(t, err)
func TestSomething(t *testing.T) {
ctx := context.Background()
tf, err := newTerraform(ctx, "testdata/simplest", map[string]string{
"DATABRICKS_HOST": "..",
"DATABRICKS_TOKEN": "..",
})
assert.NoError(t, err)
// planLoc := path.Join(t.TempDir(), "tfplan.zip")
// hasChanges, err := tf.Plan(ctx, tfexec.Out(planLoc))
// assert.True(t, hasChanges)
// assert.NoError(t, err)
err = tf.Init(ctx)
assert.NoError(t, err)
// plan, err := tf.ShowPlanFile(ctx, planLoc)
// assert.NoError(t, err)
// assert.NotNil(t, plan)
planLoc := path.Join(t.TempDir(), "tfplan.zip")
hasChanges, err := tf.Plan(ctx, tfexec.Out(planLoc))
assert.True(t, hasChanges)
assert.NoError(t, err)
plan, err := tf.ShowPlanFile(ctx, planLoc)
assert.NoError(t, err)
assert.NotNil(t, plan)
found := false
for _, r := range plan.Config.RootModule.Resources {
// TODO: add validator to prevent non-Databricks resources in *.tf files, as
// we're not replacing terraform, we're wrapping it up for the average user.
if r.Type != "databricks_job" {
continue
}
// TODO: validate that libraries on jobs defined in *.tf and libraries
// in `install_requires` defined in setup.py are the same. Exist with
// the explanatory error otherwise.
found = true
// resource "databricks_job" "this"
assert.Equal(t, "this", r.Name)
// this is just a PoC to show how to retrieve DBR version from definitions.
// production code should perform rigorous nil checks...
nc := r.Expressions["new_cluster"]
firstBlock := nc.NestedBlocks[0]
ver := firstBlock["spark_version"].ConstantValue.(string)
assert.Equal(t, "10.0.1", ver)
}
assert.True(t, found)
}
// found := false
// for _, r := range plan.Config.RootModule.Resources {
// // TODO: add validator to prevent non-Databricks resources in *.tf files, as
// // we're not replacing terraform, we're wrapping it up for the average user.
// if r.Type != "databricks_job" {
// continue
// }
// // TODO: validate that libraries on jobs defined in *.tf and libraries
// // in `install_requires` defined in setup.py are the same. Exist with
// // the explanatory error otherwise.
// found = true
// // resource "databricks_job" "this"
// assert.Equal(t, "this", r.Name)
// // this is just a PoC to show how to retrieve DBR version from definitions.
// // production code should perform rigorous nil checks...
// nc := r.Expressions["new_cluster"]
// firstBlock := nc.NestedBlocks[0]
// ver := firstBlock["spark_version"].ConstantValue.(string)
// assert.Equal(t, "10.0.1", ver)
// }
// assert.True(t, found)
// }