databricks-cli/bundle/deploy/terraform/tfdyn/convert_job.go

117 lines
3.3 KiB
Go

package tfdyn
import (
"context"
"fmt"
"sort"
"github.com/databricks/cli/bundle/internal/tf/schema"
"github.com/databricks/cli/libs/dyn"
"github.com/databricks/cli/libs/dyn/convert"
"github.com/databricks/cli/libs/log"
"github.com/databricks/databricks-sdk-go/service/jobs"
)
func convertJobResource(ctx context.Context, vin dyn.Value) (dyn.Value, error) {
// Normalize the input value to the underlying job schema.
// This removes superfluous keys and adapts the input to the expected schema.
vin, diags := convert.Normalize(jobs.JobSettings{}, vin)
for _, diag := range diags {
log.Debugf(ctx, "job normalization diagnostic: %s", diag.Summary)
}
// Sort the tasks of each job in the bundle by task key. Sorting
// the task keys ensures that the diff computed by terraform is correct and avoids
// recreates. For more details see the NOTE at
// https://registry.terraform.io/providers/databricks/databricks/latest/docs/resources/job#example-usage
// and https://github.com/databricks/terraform-provider-databricks/issues/4011
tasks := vin.Get("tasks").MustSequence()
sort.Slice(tasks, func(i, j int) bool {
return tasks[i].Get("task_key").MustString() < tasks[j].Get("task_key").MustString()
})
vout, err := dyn.Map(vin, "tasks", func(_ dyn.Path, _ dyn.Value) (dyn.Value, error) {
return dyn.V(tasks), nil
})
if err != nil {
return dyn.InvalidValue, err
}
// Modify top-level keys.
vout, err = renameKeys(vout, map[string]string{
"tasks": "task",
"job_clusters": "job_cluster",
"parameters": "parameter",
"environments": "environment",
})
if err != nil {
return dyn.InvalidValue, err
}
// Modify keys in the "git_source" block
vout, err = dyn.Map(vout, "git_source", func(_ dyn.Path, v dyn.Value) (dyn.Value, error) {
return renameKeys(v, map[string]string{
"git_branch": "branch",
"git_commit": "commit",
"git_provider": "provider",
"git_tag": "tag",
"git_url": "url",
})
})
if err != nil {
return dyn.InvalidValue, err
}
// Modify keys in the "task" blocks
vout, err = dyn.Map(vout, "task", dyn.Foreach(func(_ dyn.Path, v dyn.Value) (dyn.Value, error) {
// Modify "library" blocks for for_each_task
vout, err = dyn.Map(v, "for_each_task.task", func(_ dyn.Path, v dyn.Value) (dyn.Value, error) {
return renameKeys(v, map[string]string{
"libraries": "library",
})
})
if err != nil {
return dyn.InvalidValue, err
}
return renameKeys(vout, map[string]string{
"libraries": "library",
})
}))
if err != nil {
return dyn.InvalidValue, err
}
// Normalize the output value to the target schema.
vout, diags = convert.Normalize(schema.ResourceJob{}, vout)
for _, diag := range diags {
log.Debugf(ctx, "job normalization diagnostic: %s", diag.Summary)
}
return vout, err
}
type jobConverter struct{}
func (jobConverter) Convert(ctx context.Context, key string, vin dyn.Value, out *schema.Resources) error {
vout, err := convertJobResource(ctx, vin)
if err != nil {
return err
}
// Add the converted resource to the output.
out.Job[key] = vout.AsAny()
// Configure permissions for this resource.
if permissions := convertPermissionsResource(ctx, vin); permissions != nil {
permissions.JobId = fmt.Sprintf("${databricks_job.%s.id}", key)
out.Permissions["job_"+key] = permissions
}
return nil
}
func init() {
registerConverter("jobs", jobConverter{})
}