mirror of https://github.com/databricks/cli.git
Merge branch 'main' into close-stale-issues
This commit is contained in:
commit
68e27f2eed
|
@ -8,6 +8,12 @@
|
||||||
".codegen/cmds-account.go.tmpl": "cmd/account/cmd.go"
|
".codegen/cmds-account.go.tmpl": "cmd/account/cmd.go"
|
||||||
},
|
},
|
||||||
"toolchain": {
|
"toolchain": {
|
||||||
"required": ["go"]
|
"required": ["go"],
|
||||||
|
"post_generate": [
|
||||||
|
"go run ./bundle/internal/bundle/schema/main.go ./bundle/schema/docs/bundle_descriptions.json",
|
||||||
|
"echo 'bundle/internal/tf/schema/\\*.go linguist-generated=true' >> ./.gitattributes",
|
||||||
|
"echo 'go.sum linguist-generated=true' >> ./.gitattributes",
|
||||||
|
"echo 'bundle/schema/docs/bundle_descriptions.json linguist-generated=true' >> ./.gitattributes"
|
||||||
|
]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1 +1 @@
|
||||||
7b57ba3a53f4de3d049b6a24391fe5474212daf8
|
63caa3cb0c05045e81d3dcf2451fa990d8670f36
|
|
@ -2,7 +2,15 @@
|
||||||
|
|
||||||
package workspace
|
package workspace
|
||||||
|
|
||||||
{{ $excludes := list "command-execution" "statement-execution" "dbfs" "dbsql-permissions" "account-access-control-proxy" }}
|
{{ $excludes :=
|
||||||
|
list
|
||||||
|
"command-execution"
|
||||||
|
"statement-execution"
|
||||||
|
"dbfs"
|
||||||
|
"dbsql-permissions"
|
||||||
|
"account-access-control-proxy"
|
||||||
|
"files"
|
||||||
|
}}
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/databricks/cli/cmd/root"
|
"github.com/databricks/cli/cmd/root"
|
||||||
|
|
|
@ -10,7 +10,15 @@ import (
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
)
|
)
|
||||||
|
|
||||||
{{ $excludes := list "command-execution" "statement-execution" "dbfs" "dbsql-permissions" "account-access-control-proxy" }}
|
{{ $excludes :=
|
||||||
|
list
|
||||||
|
"command-execution"
|
||||||
|
"statement-execution"
|
||||||
|
"dbfs"
|
||||||
|
"dbsql-permissions"
|
||||||
|
"account-access-control-proxy"
|
||||||
|
"files"
|
||||||
|
}}
|
||||||
|
|
||||||
{{if not (in $excludes .KebabName) }}
|
{{if not (in $excludes .KebabName) }}
|
||||||
{{template "service" .}}
|
{{template "service" .}}
|
||||||
|
@ -18,6 +26,13 @@ import (
|
||||||
{{skipThisFile}}
|
{{skipThisFile}}
|
||||||
{{end}}
|
{{end}}
|
||||||
|
|
||||||
|
{{define "printArray" -}}
|
||||||
|
{{if le (len .) 5 -}}
|
||||||
|
[{{range $index, $element := .}}{{if ne $index 0}}, {{end}}{{$element.Name}}{{end}}]
|
||||||
|
{{- else -}}[{{range $index, $element := .}}
|
||||||
|
{{$element.Name}},{{end}}
|
||||||
|
]{{end}}{{end}}
|
||||||
|
|
||||||
{{define "service"}}
|
{{define "service"}}
|
||||||
// Slice with functions to override default command behavior.
|
// Slice with functions to override default command behavior.
|
||||||
// Functions can be added from the `init()` function in manually curated files in this directory.
|
// Functions can be added from the `init()` function in manually curated files in this directory.
|
||||||
|
@ -70,13 +85,10 @@ var {{.CamelName}}Overrides []func(
|
||||||
func new{{.PascalName}}() *cobra.Command {
|
func new{{.PascalName}}() *cobra.Command {
|
||||||
cmd := &cobra.Command{}
|
cmd := &cobra.Command{}
|
||||||
|
|
||||||
{{- $useJsonForAllFields := or .IsJsonOnly (and .Request (or (not .Request.IsAllRequiredFieldsPrimitive) .Request.HasRequiredNonBodyField)) -}}
|
|
||||||
{{- $needJsonFlag := or $useJsonForAllFields (and .Request (not .Request.IsOnlyPrimitiveFields)) -}}
|
|
||||||
|
|
||||||
{{- if .Request}}
|
{{- if .Request}}
|
||||||
|
|
||||||
var {{.CamelName}}Req {{.Service.Package.Name}}.{{.Request.PascalName}}
|
var {{.CamelName}}Req {{.Service.Package.Name}}.{{.Request.PascalName}}
|
||||||
{{- if $needJsonFlag}}
|
{{- if .CanUseJson}}
|
||||||
var {{.CamelName}}Json flags.JsonFlag
|
var {{.CamelName}}Json flags.JsonFlag
|
||||||
{{- end}}
|
{{- end}}
|
||||||
{{- end}}
|
{{- end}}
|
||||||
|
@ -89,7 +101,7 @@ func new{{.PascalName}}() *cobra.Command {
|
||||||
cmd.Flags().DurationVar(&{{.CamelName}}Timeout, "timeout", {{.Wait.Timeout}}*time.Minute, `maximum amount of time to reach {{range $i, $e := .Wait.Success}}{{if $i}} or {{end}}{{.Content}}{{end}} state`)
|
cmd.Flags().DurationVar(&{{.CamelName}}Timeout, "timeout", {{.Wait.Timeout}}*time.Minute, `maximum amount of time to reach {{range $i, $e := .Wait.Success}}{{if $i}} or {{end}}{{.Content}}{{end}} state`)
|
||||||
{{end -}}
|
{{end -}}
|
||||||
{{if .Request}}// TODO: short flags
|
{{if .Request}}// TODO: short flags
|
||||||
{{- if $needJsonFlag}}
|
{{- if .CanUseJson}}
|
||||||
cmd.Flags().Var(&{{.CamelName}}Json, "json", `either inline JSON string or @path/to/file.json with request body`)
|
cmd.Flags().Var(&{{.CamelName}}Json, "json", `either inline JSON string or @path/to/file.json with request body`)
|
||||||
{{- end}}
|
{{- end}}
|
||||||
{{$method := .}}
|
{{$method := .}}
|
||||||
|
@ -101,7 +113,7 @@ func new{{.PascalName}}() *cobra.Command {
|
||||||
{{else if .Entity.ArrayValue }}// TODO: array: {{.Name}}
|
{{else if .Entity.ArrayValue }}// TODO: array: {{.Name}}
|
||||||
{{else if .Entity.MapValue }}// TODO: map via StringToStringVar: {{.Name}}
|
{{else if .Entity.MapValue }}// TODO: map via StringToStringVar: {{.Name}}
|
||||||
{{else if .Entity.IsEmpty }}// TODO: output-only field
|
{{else if .Entity.IsEmpty }}// TODO: output-only field
|
||||||
{{else if .Entity.Enum }}cmd.Flags().Var(&{{$method.CamelName}}Req.{{.PascalName}}, "{{.KebabName}}", `{{.Summary | without "`"}}`)
|
{{else if .Entity.Enum }}cmd.Flags().Var(&{{$method.CamelName}}Req.{{.PascalName}}, "{{.KebabName}}", `{{.Summary | without "`" | trimSuffix "."}}. Supported values: {{template "printArray" .Entity.Enum}}`)
|
||||||
{{else}}cmd.Flags().{{template "arg-type" .Entity}}(&{{$method.CamelName}}Req.{{.PascalName}}, "{{.KebabName}}", {{$method.CamelName}}Req.{{.PascalName}}, `{{.Summary | without "`"}}`)
|
{{else}}cmd.Flags().{{template "arg-type" .Entity}}(&{{$method.CamelName}}Req.{{.PascalName}}, "{{.KebabName}}", {{$method.CamelName}}Req.{{.PascalName}}, `{{.Summary | without "`"}}`)
|
||||||
{{end}}
|
{{end}}
|
||||||
{{- end -}}
|
{{- end -}}
|
||||||
|
@ -113,17 +125,36 @@ func new{{.PascalName}}() *cobra.Command {
|
||||||
{{- $fullCommandName := (print $serviceName " " .KebabName) -}}
|
{{- $fullCommandName := (print $serviceName " " .KebabName) -}}
|
||||||
{{- $noPrompt := or .IsCrudCreate (in $excludeFromPrompts $fullCommandName) }}
|
{{- $noPrompt := or .IsCrudCreate (in $excludeFromPrompts $fullCommandName) }}
|
||||||
|
|
||||||
{{- $hasPosArgs := and .Request (or .Request.IsAllRequiredFieldsPrimitive (eq .PascalName "RunNow")) -}}
|
{{- $hasPosArgs := and (not .MustUseJson) (and .Request (or .Request.IsAllRequiredFieldsPrimitive (eq .PascalName "RunNow"))) -}}
|
||||||
{{- $hasSinglePosArg := and $hasPosArgs (eq 1 (len .Request.RequiredFields)) -}}
|
{{- $hasSinglePosArg := and $hasPosArgs (eq 1 (len .Request.RequiredFields)) -}}
|
||||||
{{- $serviceHasNamedIdMap := and (and .Service.List .Service.List.NamedIdMap) (not (eq .PascalName "List")) -}}
|
{{- $serviceHasNamedIdMap := and (and .Service.List .Service.List.NamedIdMap) (not (eq .PascalName "List")) -}}
|
||||||
{{- $hasIdPrompt := and (not $noPrompt) (and $hasSinglePosArg $serviceHasNamedIdMap) -}}
|
{{- $hasIdPrompt := and (not $noPrompt) (and $hasSinglePosArg $serviceHasNamedIdMap) -}}
|
||||||
{{- $wait := and .Wait (and (not .IsCrudRead) (not (eq .SnakeName "get_run"))) -}}
|
{{- $wait := and .Wait (and (not .IsCrudRead) (not (eq .SnakeName "get_run"))) -}}
|
||||||
{{- $hasRequiredArgs := and (not $hasIdPrompt) $hasPosArgs -}}
|
{{- $hasRequiredArgs := and (not $hasIdPrompt) $hasPosArgs -}}
|
||||||
|
{{- $hasSingleRequiredRequestBodyFieldWithPrompt := and (and $hasIdPrompt .Request) (eq 1 (len .Request.RequiredRequestBodyFields)) -}}
|
||||||
|
|
||||||
|
{{- $atleastOneArgumentWithDescription := false -}}
|
||||||
|
{{- if $hasPosArgs -}}
|
||||||
|
{{- range .Request.RequiredFields -}}
|
||||||
|
{{- if .HasComment -}}
|
||||||
|
{{- $atleastOneArgumentWithDescription = true -}}
|
||||||
|
{{- break -}}
|
||||||
|
{{- end -}}
|
||||||
|
{{- end -}}
|
||||||
|
{{- end -}}
|
||||||
|
|
||||||
cmd.Use = "{{.KebabName}}{{if $hasPosArgs}}{{range .Request.RequiredFields}} {{.ConstantName}}{{end}}{{end}}"
|
cmd.Use = "{{.KebabName}}{{if $hasPosArgs}}{{range .Request.RequiredFields}} {{.ConstantName}}{{end}}{{end}}"
|
||||||
{{- if .Description }}
|
{{- if .Description }}
|
||||||
cmd.Short = `{{.Summary | without "`"}}`
|
cmd.Short = `{{.Summary | without "`"}}`
|
||||||
cmd.Long = `{{.Comment " " 80 | without "`"}}`
|
cmd.Long = `{{.Comment " " 80 | without "`"}}
|
||||||
|
{{- if $atleastOneArgumentWithDescription }}
|
||||||
|
|
||||||
|
Arguments:
|
||||||
|
{{- range .Request.RequiredFields }}
|
||||||
|
{{ .ConstantName }}: {{.Comment " " 80 | without "`"}}
|
||||||
|
{{- end -}}
|
||||||
|
{{- end -}}
|
||||||
|
`
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- if .IsPrivatePreview }}
|
{{- if .IsPrivatePreview }}
|
||||||
|
|
||||||
|
@ -134,12 +165,20 @@ func new{{.PascalName}}() *cobra.Command {
|
||||||
cmd.Annotations = make(map[string]string)
|
cmd.Annotations = make(map[string]string)
|
||||||
{{if $hasRequiredArgs }}
|
{{if $hasRequiredArgs }}
|
||||||
cmd.Args = func(cmd *cobra.Command, args []string) error {
|
cmd.Args = func(cmd *cobra.Command, args []string) error {
|
||||||
check := cobra.ExactArgs({{len .Request.RequiredFields}})
|
{{- if and .CanUseJson .Request.HasRequiredRequestBodyFields }}
|
||||||
{{- if $useJsonForAllFields }}
|
|
||||||
if cmd.Flags().Changed("json") {
|
if cmd.Flags().Changed("json") {
|
||||||
check = cobra.ExactArgs(0)
|
err := cobra.ExactArgs({{len .Request.RequiredPathFields}})(cmd, args)
|
||||||
|
if err != nil {
|
||||||
|
{{- if eq 0 (len .Request.RequiredPathFields) }}
|
||||||
|
return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide{{- range $index, $field := .Request.RequiredFields}}{{if $index}},{{end}} '{{$field.Name}}'{{end}} in your JSON input")
|
||||||
|
{{- else }}
|
||||||
|
return fmt.Errorf("when --json flag is specified, provide only{{- range $index, $field := .Request.RequiredPathFields}}{{if $index}},{{end}} {{$field.ConstantName}}{{end}} as positional arguments. Provide{{- range $index, $field := .Request.RequiredRequestBodyFields}}{{if $index}},{{end}} '{{$field.Name}}'{{end}} in your JSON input")
|
||||||
|
{{- end }}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
check := cobra.ExactArgs({{len .Request.RequiredFields}})
|
||||||
return check(cmd, args)
|
return check(cmd, args)
|
||||||
}
|
}
|
||||||
{{end}}
|
{{end}}
|
||||||
|
@ -148,13 +187,17 @@ func new{{.PascalName}}() *cobra.Command {
|
||||||
ctx := cmd.Context()
|
ctx := cmd.Context()
|
||||||
{{if .Service.IsAccounts}}a := root.AccountClient(ctx){{else}}w := root.WorkspaceClient(ctx){{end}}
|
{{if .Service.IsAccounts}}a := root.AccountClient(ctx){{else}}w := root.WorkspaceClient(ctx){{end}}
|
||||||
{{- if .Request }}
|
{{- if .Request }}
|
||||||
{{ if $needJsonFlag }}
|
{{ if .CanUseJson }}
|
||||||
if cmd.Flags().Changed("json") {
|
if cmd.Flags().Changed("json") {
|
||||||
err = {{.CamelName}}Json.Unmarshal(&{{.CamelName}}Req)
|
err = {{.CamelName}}Json.Unmarshal(&{{.CamelName}}Req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}{{end}}{{if $useJsonForAllFields }} else {
|
}{{end}}{{ if .MustUseJson }}else {
|
||||||
|
return fmt.Errorf("please provide command input in JSON format by specifying the --json flag")
|
||||||
|
}{{- end}}
|
||||||
|
{{- if (not .MustUseJson) }}
|
||||||
|
{{- if and .CanUseJson $hasSingleRequiredRequestBodyFieldWithPrompt }} else {
|
||||||
{{- end}}
|
{{- end}}
|
||||||
{{- if $hasIdPrompt}}
|
{{- if $hasIdPrompt}}
|
||||||
if len(args) == 0 {
|
if len(args) == 0 {
|
||||||
|
@ -175,22 +218,30 @@ func new{{.PascalName}}() *cobra.Command {
|
||||||
return fmt.Errorf("expected to have {{range .Request.RequiredFields}}{{.Summary | trimSuffix "." | lower}}{{end}}")
|
return fmt.Errorf("expected to have {{range .Request.RequiredFields}}{{.Summary | trimSuffix "." | lower}}{{end}}")
|
||||||
}
|
}
|
||||||
{{- end -}}
|
{{- end -}}
|
||||||
|
|
||||||
{{$method := .}}
|
{{$method := .}}
|
||||||
{{- if and .Request.IsAllRequiredFieldsPrimitive (not .IsJsonOnly) -}}
|
|
||||||
{{- range $arg, $field := .Request.RequiredFields}}
|
{{- range $arg, $field := .Request.RequiredFields}}
|
||||||
|
{{- $optionalIfJsonIsUsed := and (not $hasIdPrompt) (and $field.IsRequestBodyField $method.CanUseJson) }}
|
||||||
|
{{- if $optionalIfJsonIsUsed }}
|
||||||
|
if !cmd.Flags().Changed("json") {
|
||||||
|
{{- end }}
|
||||||
{{if not $field.Entity.IsString -}}
|
{{if not $field.Entity.IsString -}}
|
||||||
_, err = fmt.Sscan(args[{{$arg}}], &{{$method.CamelName}}Req.{{$field.PascalName}})
|
_, err = fmt.Sscan(args[{{$arg}}], &{{$method.CamelName}}Req.{{$field.PascalName}})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("invalid {{$field.ConstantName}}: %s", args[{{$arg}}])
|
return fmt.Errorf("invalid {{$field.ConstantName}}: %s", args[{{$arg}}])
|
||||||
}{{else -}}
|
}{{else -}}
|
||||||
{{$method.CamelName}}Req.{{$field.PascalName}} = args[{{$arg}}]
|
{{$method.CamelName}}Req.{{$field.PascalName}} = args[{{$arg}}]
|
||||||
{{- end -}}{{end}}
|
|
||||||
{{- else -}}
|
|
||||||
return fmt.Errorf("please provide command input in JSON format by specifying the --json flag")
|
|
||||||
{{- end -}}
|
{{- end -}}
|
||||||
{{if $useJsonForAllFields }}
|
{{- if $optionalIfJsonIsUsed }}
|
||||||
}
|
}
|
||||||
{{end }}
|
{{- end }}
|
||||||
|
{{- end -}}
|
||||||
|
|
||||||
|
{{- if and .CanUseJson $hasSingleRequiredRequestBodyFieldWithPrompt }}
|
||||||
|
}
|
||||||
|
{{- end}}
|
||||||
|
|
||||||
|
{{- end}}
|
||||||
{{end}}
|
{{end}}
|
||||||
{{if $wait -}}
|
{{if $wait -}}
|
||||||
wait, err := {{if .Service.IsAccounts}}a{{else}}w{{end}}.{{.Service.PascalName}}.{{.PascalName}}(ctx{{if .Request}}, {{.CamelName}}Req{{end}})
|
wait, err := {{if .Service.IsAccounts}}a{{else}}w{{end}}.{{.Service.PascalName}}.{{.PascalName}}(ctx{{if .Request}}, {{.CamelName}}Req{{end}})
|
||||||
|
@ -261,8 +312,13 @@ func init() {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
{{ if .Response -}}
|
{{ if .Response -}}
|
||||||
return cmdio.Render(ctx, response)
|
{{- if .IsResponseByteStream -}}
|
||||||
|
defer response.{{.ResponseBodyField.PascalName}}.Close()
|
||||||
|
return cmdio.RenderReader(ctx, response.{{.ResponseBodyField.PascalName}})
|
||||||
{{- else -}}
|
{{- else -}}
|
||||||
|
return cmdio.Render(ctx, response)
|
||||||
|
{{- end -}}
|
||||||
|
{{ else -}}
|
||||||
return nil
|
return nil
|
||||||
{{- end -}}
|
{{- end -}}
|
||||||
{{- end -}}
|
{{- end -}}
|
||||||
|
|
|
@ -10,9 +10,9 @@ cmd/account/ip-access-lists/ip-access-lists.go linguist-generated=true
|
||||||
cmd/account/log-delivery/log-delivery.go linguist-generated=true
|
cmd/account/log-delivery/log-delivery.go linguist-generated=true
|
||||||
cmd/account/metastore-assignments/metastore-assignments.go linguist-generated=true
|
cmd/account/metastore-assignments/metastore-assignments.go linguist-generated=true
|
||||||
cmd/account/metastores/metastores.go linguist-generated=true
|
cmd/account/metastores/metastores.go linguist-generated=true
|
||||||
cmd/account/network-policy/network-policy.go linguist-generated=true
|
cmd/account/network-connectivity/network-connectivity.go linguist-generated=true
|
||||||
cmd/account/networks/networks.go linguist-generated=true
|
cmd/account/networks/networks.go linguist-generated=true
|
||||||
cmd/account/o-auth-enrollment/o-auth-enrollment.go linguist-generated=true
|
cmd/account/o-auth-published-apps/o-auth-published-apps.go linguist-generated=true
|
||||||
cmd/account/private-access/private-access.go linguist-generated=true
|
cmd/account/private-access/private-access.go linguist-generated=true
|
||||||
cmd/account/published-app-integration/published-app-integration.go linguist-generated=true
|
cmd/account/published-app-integration/published-app-integration.go linguist-generated=true
|
||||||
cmd/account/service-principal-secrets/service-principal-secrets.go linguist-generated=true
|
cmd/account/service-principal-secrets/service-principal-secrets.go linguist-generated=true
|
||||||
|
@ -25,13 +25,17 @@ cmd/account/vpc-endpoints/vpc-endpoints.go linguist-generated=true
|
||||||
cmd/account/workspace-assignment/workspace-assignment.go linguist-generated=true
|
cmd/account/workspace-assignment/workspace-assignment.go linguist-generated=true
|
||||||
cmd/account/workspaces/workspaces.go linguist-generated=true
|
cmd/account/workspaces/workspaces.go linguist-generated=true
|
||||||
cmd/workspace/alerts/alerts.go linguist-generated=true
|
cmd/workspace/alerts/alerts.go linguist-generated=true
|
||||||
|
cmd/workspace/apps/apps.go linguist-generated=true
|
||||||
|
cmd/workspace/artifact-allowlists/artifact-allowlists.go linguist-generated=true
|
||||||
cmd/workspace/catalogs/catalogs.go linguist-generated=true
|
cmd/workspace/catalogs/catalogs.go linguist-generated=true
|
||||||
cmd/workspace/clean-rooms/clean-rooms.go linguist-generated=true
|
cmd/workspace/clean-rooms/clean-rooms.go linguist-generated=true
|
||||||
cmd/workspace/cluster-policies/cluster-policies.go linguist-generated=true
|
cmd/workspace/cluster-policies/cluster-policies.go linguist-generated=true
|
||||||
cmd/workspace/clusters/clusters.go linguist-generated=true
|
cmd/workspace/clusters/clusters.go linguist-generated=true
|
||||||
cmd/workspace/cmd.go linguist-generated=true
|
cmd/workspace/cmd.go linguist-generated=true
|
||||||
cmd/workspace/connections/connections.go linguist-generated=true
|
cmd/workspace/connections/connections.go linguist-generated=true
|
||||||
|
cmd/workspace/credentials-manager/credentials-manager.go linguist-generated=true
|
||||||
cmd/workspace/current-user/current-user.go linguist-generated=true
|
cmd/workspace/current-user/current-user.go linguist-generated=true
|
||||||
|
cmd/workspace/dashboard-widgets/dashboard-widgets.go linguist-generated=true
|
||||||
cmd/workspace/dashboards/dashboards.go linguist-generated=true
|
cmd/workspace/dashboards/dashboards.go linguist-generated=true
|
||||||
cmd/workspace/data-sources/data-sources.go linguist-generated=true
|
cmd/workspace/data-sources/data-sources.go linguist-generated=true
|
||||||
cmd/workspace/experiments/experiments.go linguist-generated=true
|
cmd/workspace/experiments/experiments.go linguist-generated=true
|
||||||
|
@ -48,19 +52,23 @@ cmd/workspace/jobs/jobs.go linguist-generated=true
|
||||||
cmd/workspace/libraries/libraries.go linguist-generated=true
|
cmd/workspace/libraries/libraries.go linguist-generated=true
|
||||||
cmd/workspace/metastores/metastores.go linguist-generated=true
|
cmd/workspace/metastores/metastores.go linguist-generated=true
|
||||||
cmd/workspace/model-registry/model-registry.go linguist-generated=true
|
cmd/workspace/model-registry/model-registry.go linguist-generated=true
|
||||||
|
cmd/workspace/model-versions/model-versions.go linguist-generated=true
|
||||||
cmd/workspace/permissions/permissions.go linguist-generated=true
|
cmd/workspace/permissions/permissions.go linguist-generated=true
|
||||||
cmd/workspace/pipelines/pipelines.go linguist-generated=true
|
cmd/workspace/pipelines/pipelines.go linguist-generated=true
|
||||||
cmd/workspace/policy-families/policy-families.go linguist-generated=true
|
cmd/workspace/policy-families/policy-families.go linguist-generated=true
|
||||||
cmd/workspace/providers/providers.go linguist-generated=true
|
cmd/workspace/providers/providers.go linguist-generated=true
|
||||||
cmd/workspace/queries/queries.go linguist-generated=true
|
cmd/workspace/queries/queries.go linguist-generated=true
|
||||||
cmd/workspace/query-history/query-history.go linguist-generated=true
|
cmd/workspace/query-history/query-history.go linguist-generated=true
|
||||||
|
cmd/workspace/query-visualizations/query-visualizations.go linguist-generated=true
|
||||||
cmd/workspace/recipient-activation/recipient-activation.go linguist-generated=true
|
cmd/workspace/recipient-activation/recipient-activation.go linguist-generated=true
|
||||||
cmd/workspace/recipients/recipients.go linguist-generated=true
|
cmd/workspace/recipients/recipients.go linguist-generated=true
|
||||||
|
cmd/workspace/registered-models/registered-models.go linguist-generated=true
|
||||||
cmd/workspace/repos/repos.go linguist-generated=true
|
cmd/workspace/repos/repos.go linguist-generated=true
|
||||||
cmd/workspace/schemas/schemas.go linguist-generated=true
|
cmd/workspace/schemas/schemas.go linguist-generated=true
|
||||||
cmd/workspace/secrets/secrets.go linguist-generated=true
|
cmd/workspace/secrets/secrets.go linguist-generated=true
|
||||||
cmd/workspace/service-principals/service-principals.go linguist-generated=true
|
cmd/workspace/service-principals/service-principals.go linguist-generated=true
|
||||||
cmd/workspace/serving-endpoints/serving-endpoints.go linguist-generated=true
|
cmd/workspace/serving-endpoints/serving-endpoints.go linguist-generated=true
|
||||||
|
cmd/workspace/settings/settings.go linguist-generated=true
|
||||||
cmd/workspace/shares/shares.go linguist-generated=true
|
cmd/workspace/shares/shares.go linguist-generated=true
|
||||||
cmd/workspace/storage-credentials/storage-credentials.go linguist-generated=true
|
cmd/workspace/storage-credentials/storage-credentials.go linguist-generated=true
|
||||||
cmd/workspace/system-schemas/system-schemas.go linguist-generated=true
|
cmd/workspace/system-schemas/system-schemas.go linguist-generated=true
|
||||||
|
@ -74,3 +82,6 @@ cmd/workspace/warehouses/warehouses.go linguist-generated=true
|
||||||
cmd/workspace/workspace-bindings/workspace-bindings.go linguist-generated=true
|
cmd/workspace/workspace-bindings/workspace-bindings.go linguist-generated=true
|
||||||
cmd/workspace/workspace-conf/workspace-conf.go linguist-generated=true
|
cmd/workspace/workspace-conf/workspace-conf.go linguist-generated=true
|
||||||
cmd/workspace/workspace/workspace.go linguist-generated=true
|
cmd/workspace/workspace/workspace.go linguist-generated=true
|
||||||
|
bundle/internal/tf/schema/\*.go linguist-generated=true
|
||||||
|
go.sum linguist-generated=true
|
||||||
|
bundle/schema/docs/bundle_descriptions.json linguist-generated=true
|
||||||
|
|
|
@ -0,0 +1,29 @@
|
||||||
|
---
|
||||||
|
name: Bug report for the CLI
|
||||||
|
about: Use this to report an issue with the CLI.
|
||||||
|
title: ''
|
||||||
|
labels: CLI
|
||||||
|
---
|
||||||
|
|
||||||
|
### Describe the issue
|
||||||
|
A clear and concise description of what the issue is
|
||||||
|
|
||||||
|
### Steps to reproduce the behavior
|
||||||
|
Please list the steps required to reproduce the issue, for example:
|
||||||
|
1. Run `databricks clusters ...`
|
||||||
|
2. See error
|
||||||
|
|
||||||
|
### Expected Behavior
|
||||||
|
Clear and concise description of what should have happened
|
||||||
|
|
||||||
|
### Actual Behavior
|
||||||
|
Clear and concise description of what actually happened
|
||||||
|
|
||||||
|
### OS and CLI version
|
||||||
|
Please include the version of the CLI (eg: v0.1.2) and the operating system (eg: windows). You can run databricks --version to get the version of your Databricks CLI
|
||||||
|
|
||||||
|
### Is this a regression?
|
||||||
|
Did this work in a previous version of the CLI? If so, which versions did you try?
|
||||||
|
|
||||||
|
### Debug Logs
|
||||||
|
Output logs if you run the command with debug logs enabled. Example: databricks clusters list --log-level=debug. Redact if needed
|
|
@ -0,0 +1 @@
|
||||||
|
blank_issues_enabled: true
|
|
@ -0,0 +1,33 @@
|
||||||
|
---
|
||||||
|
name: Bug report for Databricks Asset Bundles
|
||||||
|
about: Use this to report an issue with Databricks Asset Bundles.
|
||||||
|
labels: DABs
|
||||||
|
title: ''
|
||||||
|
---
|
||||||
|
|
||||||
|
### Describe the issue
|
||||||
|
A clear and concise description of what the issue is
|
||||||
|
|
||||||
|
### Configuration
|
||||||
|
Please provide a minimal reproducible configuration for the issue
|
||||||
|
|
||||||
|
### Steps to reproduce the behavior
|
||||||
|
Please list the steps required to reproduce the issue, for example:
|
||||||
|
1. Run `databricks bundle deploy ...`
|
||||||
|
2. Run `databricks bundle run ...`
|
||||||
|
3. See error
|
||||||
|
|
||||||
|
### Expected Behavior
|
||||||
|
Clear and concise description of what should have happened
|
||||||
|
|
||||||
|
### Actual Behavior
|
||||||
|
Clear and concise description of what actually happened
|
||||||
|
|
||||||
|
### OS and CLI version
|
||||||
|
Please provide the version of the CLI (eg: v0.1.2) and the operating system (eg: windows). You can run databricks --version to get the version of your Databricks CLI
|
||||||
|
|
||||||
|
### Is this a regression?
|
||||||
|
Did this work in a previous version of the CLI? If so, which versions did you try?
|
||||||
|
|
||||||
|
### Debug Logs
|
||||||
|
Output logs if you run the command with debug logs enabled. Example: databricks bundle deploy --log-level=debug. Redact if needed
|
|
@ -5,6 +5,14 @@ on:
|
||||||
types: [opened, synchronize]
|
types: [opened, synchronize]
|
||||||
merge_group:
|
merge_group:
|
||||||
types: [checks_requested]
|
types: [checks_requested]
|
||||||
|
push:
|
||||||
|
# Always run on push to main. The build cache can only be reused
|
||||||
|
# if it was saved by a run from the repository's default branch.
|
||||||
|
# The run result will be identical to that from the merge queue
|
||||||
|
# because the commit is identical, yet we need to perform it to
|
||||||
|
# seed the build cache.
|
||||||
|
branches:
|
||||||
|
- main
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
tests:
|
tests:
|
||||||
|
@ -20,16 +28,17 @@ jobs:
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout repository and submodules
|
- name: Checkout repository and submodules
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
- name: Unshallow
|
|
||||||
run: git fetch --prune --unshallow
|
|
||||||
|
|
||||||
- name: Setup Go
|
- name: Setup Go
|
||||||
uses: actions/setup-go@v3
|
uses: actions/setup-go@v4
|
||||||
with:
|
with:
|
||||||
go-version: 1.19.5
|
go-version: 1.21.0
|
||||||
cache: true
|
|
||||||
|
- name: Setup Python
|
||||||
|
uses: actions/setup-python@v4
|
||||||
|
with:
|
||||||
|
python-version: '3.9'
|
||||||
|
|
||||||
- name: Set go env
|
- name: Set go env
|
||||||
run: |
|
run: |
|
||||||
|
@ -51,23 +60,23 @@ jobs:
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
- name: Setup Go
|
- name: Setup Go
|
||||||
uses: actions/setup-go@v3
|
uses: actions/setup-go@v4
|
||||||
with:
|
with:
|
||||||
# Use 1.19 because of godoc formatting.
|
go-version: 1.21.0
|
||||||
# See https://tip.golang.org/doc/go1.19#go-doc.
|
|
||||||
go-version: 1.19
|
|
||||||
|
|
||||||
# No need to download cached dependencies when running gofmt.
|
# No need to download cached dependencies when running gofmt.
|
||||||
cache: false
|
cache: false
|
||||||
|
|
||||||
- name: Run gofmt
|
- name: Install goimports
|
||||||
run: |
|
run: |
|
||||||
# -l: list files that were reformatted
|
go install golang.org/x/tools/cmd/goimports@latest
|
||||||
# -w: write back formatted files to disk
|
|
||||||
gofmt -l -w ./
|
- name: Run make fmt
|
||||||
|
run: |
|
||||||
|
make fmt
|
||||||
|
|
||||||
- name: Run go mod tidy
|
- name: Run go mod tidy
|
||||||
run: |
|
run: |
|
||||||
|
|
|
@ -13,32 +13,15 @@ jobs:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout repository and submodules
|
- name: Checkout repository and submodules
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
- name: Unshallow
|
fetch-depth: 0
|
||||||
run: git fetch --prune --unshallow
|
fetch-tags: true
|
||||||
|
|
||||||
- name: Setup Go
|
- name: Setup Go
|
||||||
id: go
|
uses: actions/setup-go@v4
|
||||||
uses: actions/setup-go@v3
|
|
||||||
with:
|
with:
|
||||||
go-version: 1.19.5
|
go-version: 1.21.0
|
||||||
|
|
||||||
- name: Locate cache paths
|
|
||||||
id: cache
|
|
||||||
run: |
|
|
||||||
echo "GOMODCACHE=$(go env GOMODCACHE)" >> $GITHUB_OUTPUT
|
|
||||||
echo "GOCACHE=$(go env GOCACHE)" >> $GITHUB_OUTPUT
|
|
||||||
|
|
||||||
# Note: use custom caching because below performs a cross platform build
|
|
||||||
# through goreleaser and don't want to share a cache with the test builds.
|
|
||||||
- name: Setup caching
|
|
||||||
uses: actions/cache@v3
|
|
||||||
with:
|
|
||||||
path: |
|
|
||||||
${{ steps.cache.outputs.GOMODCACHE }}
|
|
||||||
${{ steps.cache.outputs.GOCACHE }}
|
|
||||||
key: release-${{ runner.os }}-go-${{ steps.go.outputs.go-version }}-${{ hashFiles('go.sum', '.goreleaser.yaml') }}
|
|
||||||
|
|
||||||
- name: Hide snapshot tag to outsmart GoReleaser
|
- name: Hide snapshot tag to outsmart GoReleaser
|
||||||
run: git tag -d snapshot || true
|
run: git tag -d snapshot || true
|
||||||
|
@ -73,7 +56,7 @@ jobs:
|
||||||
- name: Update snapshot tag
|
- name: Update snapshot tag
|
||||||
|
|
||||||
# Snapshot release may only be updated for commits to the main branch.
|
# Snapshot release may only be updated for commits to the main branch.
|
||||||
# if: github.ref == 'refs/heads/main'
|
if: github.ref == 'refs/heads/main'
|
||||||
|
|
||||||
run: |
|
run: |
|
||||||
git tag snapshot
|
git tag snapshot
|
||||||
|
@ -82,7 +65,7 @@ jobs:
|
||||||
- name: Update snapshot release
|
- name: Update snapshot release
|
||||||
|
|
||||||
# Snapshot release may only be updated for commits to the main branch.
|
# Snapshot release may only be updated for commits to the main branch.
|
||||||
# if: github.ref == 'refs/heads/main'
|
if: github.ref == 'refs/heads/main'
|
||||||
|
|
||||||
uses: softprops/action-gh-release@v1
|
uses: softprops/action-gh-release@v1
|
||||||
with:
|
with:
|
||||||
|
|
|
@ -9,40 +9,111 @@ on:
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
goreleaser:
|
goreleaser:
|
||||||
|
outputs:
|
||||||
|
artifacts: ${{ steps.releaser.outputs.artifacts }}
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout repository and submodules
|
- name: Checkout repository and submodules
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
- name: Unshallow
|
fetch-depth: 0
|
||||||
run: git fetch --prune --unshallow
|
fetch-tags: true
|
||||||
|
|
||||||
- name: Setup Go
|
- name: Setup Go
|
||||||
id: go
|
uses: actions/setup-go@v4
|
||||||
uses: actions/setup-go@v3
|
|
||||||
with:
|
with:
|
||||||
go-version: 1.19.5
|
go-version: 1.21.0
|
||||||
|
|
||||||
- name: Locate cache paths
|
|
||||||
id: cache
|
|
||||||
run: |
|
|
||||||
echo "GOMODCACHE=$(go env GOMODCACHE)" >> $GITHUB_OUTPUT
|
|
||||||
echo "GOCACHE=$(go env GOCACHE)" >> $GITHUB_OUTPUT
|
|
||||||
|
|
||||||
# Note: use custom caching because below performs a cross platform build
|
|
||||||
# through goreleaser and don't want to share a cache with the test builds.
|
|
||||||
- name: Setup caching
|
|
||||||
uses: actions/cache@v3
|
|
||||||
with:
|
|
||||||
path: |
|
|
||||||
${{ steps.cache.outputs.GOMODCACHE }}
|
|
||||||
${{ steps.cache.outputs.GOCACHE }}
|
|
||||||
key: release-${{ runner.os }}-go-${{ steps.go.outputs.go-version }}-${{ hashFiles('go.sum', '.goreleaser.yaml') }}
|
|
||||||
|
|
||||||
- name: Run GoReleaser
|
- name: Run GoReleaser
|
||||||
|
id: releaser
|
||||||
uses: goreleaser/goreleaser-action@v4
|
uses: goreleaser/goreleaser-action@v4
|
||||||
with:
|
with:
|
||||||
version: latest
|
version: latest
|
||||||
args: release
|
args: release
|
||||||
env:
|
env:
|
||||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
|
||||||
|
create-setup-cli-release-pr:
|
||||||
|
needs: goreleaser
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Set VERSION variable from tag
|
||||||
|
run: |
|
||||||
|
VERSION=${{ github.ref_name }}
|
||||||
|
echo "VERSION=${VERSION:1}" >> $GITHUB_ENV
|
||||||
|
|
||||||
|
- name: Update setup-cli
|
||||||
|
uses: actions/github-script@v6
|
||||||
|
with:
|
||||||
|
github-token: ${{ secrets.DECO_GITHUB_TOKEN }}
|
||||||
|
script: |
|
||||||
|
await github.rest.actions.createWorkflowDispatch({
|
||||||
|
owner: 'databricks',
|
||||||
|
repo: 'setup-cli',
|
||||||
|
workflow_id: 'release-pr.yml',
|
||||||
|
ref: 'main',
|
||||||
|
inputs: {
|
||||||
|
version: "${{ env.VERSION }}",
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
create-homebrew-tap-release-pr:
|
||||||
|
needs: goreleaser
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Set VERSION variable from tag
|
||||||
|
run: |
|
||||||
|
VERSION=${{ github.ref_name }}
|
||||||
|
echo "VERSION=${VERSION:1}" >> $GITHUB_ENV
|
||||||
|
|
||||||
|
- name: Update homebrew-tap
|
||||||
|
uses: actions/github-script@v6
|
||||||
|
with:
|
||||||
|
github-token: ${{ secrets.DECO_GITHUB_TOKEN }}
|
||||||
|
script: |
|
||||||
|
let artifacts = JSON.parse('${{ needs.goreleaser.outputs.artifacts }}')
|
||||||
|
artifacts = artifacts.filter(a => a.type == "Archive")
|
||||||
|
artifacts = new Map(
|
||||||
|
artifacts.map(a => [
|
||||||
|
a.goos + "_" + a.goarch,
|
||||||
|
a.extra.Checksum.replace("sha256:", "")
|
||||||
|
])
|
||||||
|
)
|
||||||
|
|
||||||
|
await github.rest.actions.createWorkflowDispatch({
|
||||||
|
owner: 'databricks',
|
||||||
|
repo: 'homebrew-tap',
|
||||||
|
workflow_id: 'release-pr.yml',
|
||||||
|
ref: 'main',
|
||||||
|
inputs: {
|
||||||
|
version: "${{ env.VERSION }}",
|
||||||
|
darwin_amd64_sha: artifacts.get('darwin_amd64'),
|
||||||
|
darwin_arm64_sha: artifacts.get('darwin_arm64'),
|
||||||
|
linux_amd64_sha: artifacts.get('linux_amd64'),
|
||||||
|
linux_arm64_sha: artifacts.get('linux_arm64')
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
create-vscode-extension-update-pr:
|
||||||
|
needs: goreleaser
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Set VERSION variable from tag
|
||||||
|
run: |
|
||||||
|
VERSION=${{ github.ref_name }}
|
||||||
|
echo "VERSION=${VERSION:1}" >> $GITHUB_ENV
|
||||||
|
|
||||||
|
- name: Update CLI version in the VSCode extension
|
||||||
|
uses: actions/github-script@v6
|
||||||
|
with:
|
||||||
|
github-token: ${{ secrets.DECO_GITHUB_TOKEN }}
|
||||||
|
script: |
|
||||||
|
await github.rest.actions.createWorkflowDispatch({
|
||||||
|
owner: 'databricks',
|
||||||
|
repo: 'databricks-vscode',
|
||||||
|
workflow_id: 'update-cli-version.yml',
|
||||||
|
ref: 'main',
|
||||||
|
inputs: {
|
||||||
|
version: "${{ env.VERSION }}",
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
|
@ -28,3 +28,6 @@ __pycache__
|
||||||
.terraform.lock.hcl
|
.terraform.lock.hcl
|
||||||
|
|
||||||
.vscode/launch.json
|
.vscode/launch.json
|
||||||
|
.vscode/tasks.json
|
||||||
|
|
||||||
|
.databricks
|
||||||
|
|
|
@ -35,12 +35,6 @@ builds:
|
||||||
goarch:
|
goarch:
|
||||||
- amd64
|
- amd64
|
||||||
- arm64
|
- arm64
|
||||||
- '386'
|
|
||||||
ignore:
|
|
||||||
- goos: darwin
|
|
||||||
goarch: '386'
|
|
||||||
- goos: linux
|
|
||||||
goarch: '386'
|
|
||||||
binary: databricks
|
binary: databricks
|
||||||
archives:
|
archives:
|
||||||
- format: zip
|
- format: zip
|
||||||
|
|
|
@ -0,0 +1,3 @@
|
||||||
|
# Typings for Pylance in VS Code
|
||||||
|
# see https://github.com/microsoft/pyright/blob/main/docs/builtins.md
|
||||||
|
from databricks.sdk.runtime import *
|
|
@ -7,5 +7,8 @@
|
||||||
"files.insertFinalNewline": true,
|
"files.insertFinalNewline": true,
|
||||||
"files.trimFinalNewlines": true,
|
"files.trimFinalNewlines": true,
|
||||||
"python.envFile": "${workspaceFolder}/.databricks/.databricks.env",
|
"python.envFile": "${workspaceFolder}/.databricks/.databricks.env",
|
||||||
"databricks.python.envFile": "${workspaceFolder}/.env"
|
"databricks.python.envFile": "${workspaceFolder}/.env",
|
||||||
|
"python.analysis.stubPath": ".vscode",
|
||||||
|
"jupyter.interactiveWindow.cellMarker.codeRegex": "^# COMMAND ----------|^# Databricks notebook source|^(#\\s*%%|#\\s*\\<codecell\\>|#\\s*In\\[\\d*?\\]|#\\s*In\\[ \\])",
|
||||||
|
"jupyter.interactiveWindow.cellMarker.default": "# COMMAND ----------"
|
||||||
}
|
}
|
||||||
|
|
592
CHANGELOG.md
592
CHANGELOG.md
|
@ -1,5 +1,597 @@
|
||||||
# Version changelog
|
# Version changelog
|
||||||
|
|
||||||
|
## 0.211.0
|
||||||
|
|
||||||
|
CLI:
|
||||||
|
* Upgrade Go SDK to 0.27.0 ([#1064](https://github.com/databricks/cli/pull/1064)).
|
||||||
|
* Skip profile resolution if `DATABRICKS_AUTH_TYPE` is set ([#1068](https://github.com/databricks/cli/pull/1068)).
|
||||||
|
* Do not allow input prompts in Git Bash terminal ([#1069](https://github.com/databricks/cli/pull/1069)).
|
||||||
|
* Added output template for list-secrets command ([#1074](https://github.com/databricks/cli/pull/1074)).
|
||||||
|
|
||||||
|
Bundles:
|
||||||
|
* Set metadata fields required to enable break-glass UI for jobs ([#880](https://github.com/databricks/cli/pull/880)).
|
||||||
|
* Do not prompt for template values in Git Bash ([#1082](https://github.com/databricks/cli/pull/1082)).
|
||||||
|
* Tune output of bundle deploy command ([#1047](https://github.com/databricks/cli/pull/1047)).
|
||||||
|
|
||||||
|
API Changes:
|
||||||
|
* Changed `databricks connections update` command with new required argument order.
|
||||||
|
* Changed `databricks serving-endpoints update-config` command with new required argument order.
|
||||||
|
* Added `databricks serving-endpoints put` command.
|
||||||
|
* Removed `databricks account network-policy` command group.
|
||||||
|
|
||||||
|
OpenAPI commit 63caa3cb0c05045e81d3dcf2451fa990d8670f36 (2023-12-12)
|
||||||
|
|
||||||
|
Dependency updates:
|
||||||
|
* Bump github.com/google/uuid from 1.4.0 to 1.5.0 ([#1073](https://github.com/databricks/cli/pull/1073)).
|
||||||
|
* Bump golang.org/x/crypto from 0.16.0 to 0.17.0 ([#1076](https://github.com/databricks/cli/pull/1076)).
|
||||||
|
|
||||||
|
## 0.210.3
|
||||||
|
|
||||||
|
Bundles:
|
||||||
|
* Improve default template ([#1046](https://github.com/databricks/cli/pull/1046)).
|
||||||
|
* Fix passthrough of pipeline notifications ([#1058](https://github.com/databricks/cli/pull/1058)).
|
||||||
|
|
||||||
|
Internal:
|
||||||
|
* Stub out Python virtual environment installation for `labs` commands ([#1057](https://github.com/databricks/cli/pull/1057)).
|
||||||
|
* Upgrade Terraform schema version to v1.31.1 ([#1055](https://github.com/databricks/cli/pull/1055)).
|
||||||
|
|
||||||
|
|
||||||
|
Dependency updates:
|
||||||
|
* Bump github.com/hashicorp/hc-install from 0.6.1 to 0.6.2 ([#1054](https://github.com/databricks/cli/pull/1054)).
|
||||||
|
* Bump github.com/databricks/databricks-sdk-go from 0.26.1 to 0.26.2 ([#1053](https://github.com/databricks/cli/pull/1053)).
|
||||||
|
|
||||||
|
## 0.210.2
|
||||||
|
|
||||||
|
CLI:
|
||||||
|
* Add documentation for positional args in commands generated from the Databricks OpenAPI specification ([#1033](https://github.com/databricks/cli/pull/1033)).
|
||||||
|
* Ask for host when .databrickscfg doesn't exist ([#1041](https://github.com/databricks/cli/pull/1041)).
|
||||||
|
* Add list of supported values for flags that represent an enum field ([#1036](https://github.com/databricks/cli/pull/1036)).
|
||||||
|
|
||||||
|
Bundles:
|
||||||
|
* Fix panic when bundle auth resolution fails ([#1002](https://github.com/databricks/cli/pull/1002)).
|
||||||
|
* Add versioning for bundle templates ([#972](https://github.com/databricks/cli/pull/972)).
|
||||||
|
* Add support for conditional prompting in bundle init ([#971](https://github.com/databricks/cli/pull/971)).
|
||||||
|
* Pass parameters to task when run with `--python-params` and `python_wheel_wrapper` is true ([#1037](https://github.com/databricks/cli/pull/1037)).
|
||||||
|
* Change default_python template to auto-update version on each wheel build ([#1034](https://github.com/databricks/cli/pull/1034)).
|
||||||
|
|
||||||
|
Internal:
|
||||||
|
* Rewrite the friendly log handler ([#1038](https://github.com/databricks/cli/pull/1038)).
|
||||||
|
* Move bundle schema update to an internal module ([#1012](https://github.com/databricks/cli/pull/1012)).
|
||||||
|
|
||||||
|
|
||||||
|
Dependency updates:
|
||||||
|
* Bump github.com/databricks/databricks-sdk-go from 0.26.0 to 0.26.1 ([#1040](https://github.com/databricks/cli/pull/1040)).
|
||||||
|
|
||||||
|
## 0.210.1
|
||||||
|
|
||||||
|
This is a bugfix release to address issues with v0.210.0.
|
||||||
|
|
||||||
|
CLI:
|
||||||
|
* Fix `panic: is not set` ([#1027](https://github.com/databricks/cli/pull/1027)).
|
||||||
|
* Fix `databricks configure` if new profile is specified ([#1030](https://github.com/databricks/cli/pull/1030)).
|
||||||
|
* Filter out system clusters for `--configure-cluster` ([#1031](https://github.com/databricks/cli/pull/1031)).
|
||||||
|
|
||||||
|
Bundles:
|
||||||
|
* Fixed panic when job has trigger and in development mode ([#1026](https://github.com/databricks/cli/pull/1026)).
|
||||||
|
|
||||||
|
Internal:
|
||||||
|
* Use `fetch-tags` option in release workflows ([#1025](https://github.com/databricks/cli/pull/1025)).
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
## 0.210.0
|
||||||
|
|
||||||
|
This release includes the new `databricks labs` command to install, manage, and run Databricks Labs projects.
|
||||||
|
|
||||||
|
CLI:
|
||||||
|
* Add `--debug` as shortcut for `--log-level debug` ([#964](https://github.com/databricks/cli/pull/964)).
|
||||||
|
* Improved usability of `databricks auth login ... --configure-cluster` ([#956](https://github.com/databricks/cli/pull/956)).
|
||||||
|
* Make `databricks configure` save only explicit fields ([#973](https://github.com/databricks/cli/pull/973)).
|
||||||
|
* Add `databricks labs` command group ([#914](https://github.com/databricks/cli/pull/914)).
|
||||||
|
* Tolerate missing .databrickscfg file during `databricks auth login` ([#1003](https://github.com/databricks/cli/pull/1003)).
|
||||||
|
* Add `--configure-cluster` flag to configure command ([#1005](https://github.com/databricks/cli/pull/1005)).
|
||||||
|
* Fix bug where the account or workspace client could be `nil` ([#1020](https://github.com/databricks/cli/pull/1020)).
|
||||||
|
|
||||||
|
Bundles:
|
||||||
|
* Do not allow empty descriptions for bundle template inputs ([#967](https://github.com/databricks/cli/pull/967)).
|
||||||
|
* Added support for top-level permissions ([#928](https://github.com/databricks/cli/pull/928)).
|
||||||
|
* Allow jobs to be manually unpaused in development mode ([#885](https://github.com/databricks/cli/pull/885)).
|
||||||
|
* Fix template initialization from current working directory ([#976](https://github.com/databricks/cli/pull/976)).
|
||||||
|
* Add `--tag` and `--branch` options to bundle init command ([#975](https://github.com/databricks/cli/pull/975)).
|
||||||
|
* Work around DLT issue with `` not being set correctly ([#999](https://github.com/databricks/cli/pull/999)).
|
||||||
|
* Enable `spark_jar_task` with local JAR libraries ([#993](https://github.com/databricks/cli/pull/993)).
|
||||||
|
* Pass `USERPROFILE` environment variable to Terraform ([#1001](https://github.com/databricks/cli/pull/1001)).
|
||||||
|
* Improve error message when path is not a bundle template ([#985](https://github.com/databricks/cli/pull/985)).
|
||||||
|
* Correctly overwrite local state if remote state is newer ([#1008](https://github.com/databricks/cli/pull/1008)).
|
||||||
|
* Add mlops-stacks to the default `databricks bundle init` prompt ([#988](https://github.com/databricks/cli/pull/988)).
|
||||||
|
* Do not add wheel content hash in uploaded Python wheel path ([#1015](https://github.com/databricks/cli/pull/1015)).
|
||||||
|
* Do not replace pipeline libraries if there are no matches for pattern ([#1021](https://github.com/databricks/cli/pull/1021)).
|
||||||
|
|
||||||
|
Internal:
|
||||||
|
* Update CLI version in the VS Code extension during release ([#1014](https://github.com/databricks/cli/pull/1014)).
|
||||||
|
|
||||||
|
API Changes:
|
||||||
|
* Changed `databricks functions create` command.
|
||||||
|
* Changed `databricks metastores create` command with new required argument order.
|
||||||
|
* Removed `databricks metastores enable-optimization` command.
|
||||||
|
* Removed `databricks account o-auth-enrollment` command group.
|
||||||
|
* Removed `databricks apps delete` command.
|
||||||
|
* Removed `databricks apps get` command.
|
||||||
|
* Added `databricks apps delete-app` command.
|
||||||
|
* Added `databricks apps get-app` command.
|
||||||
|
* Added `databricks apps get-app-deployment-status` command.
|
||||||
|
* Added `databricks apps get-apps` command.
|
||||||
|
* Added `databricks apps get-events` command.
|
||||||
|
* Added `databricks account network-connectivity` command group.
|
||||||
|
|
||||||
|
OpenAPI commit 22f09783eb8a84d52026f856be3b2068f9498db3 (2023-11-23)
|
||||||
|
|
||||||
|
Dependency updates:
|
||||||
|
* Bump golang.org/x/term from 0.13.0 to 0.14.0 ([#981](https://github.com/databricks/cli/pull/981)).
|
||||||
|
* Bump github.com/hashicorp/terraform-json from 0.17.1 to 0.18.0 ([#979](https://github.com/databricks/cli/pull/979)).
|
||||||
|
* Bump golang.org/x/oauth2 from 0.13.0 to 0.14.0 ([#982](https://github.com/databricks/cli/pull/982)).
|
||||||
|
* Bump github.com/databricks/databricks-sdk-go from 0.24.0 to 0.25.0 ([#980](https://github.com/databricks/cli/pull/980)).
|
||||||
|
* Bump github.com/databricks/databricks-sdk-go from 0.25.0 to 0.26.0 ([#1019](https://github.com/databricks/cli/pull/1019)).
|
||||||
|
|
||||||
|
## 0.209.1
|
||||||
|
|
||||||
|
CLI:
|
||||||
|
* Hide `--progress-format` global flag ([#965](https://github.com/databricks/cli/pull/965)).
|
||||||
|
* Make configure command visible + fix bundle command description ([#961](https://github.com/databricks/cli/pull/961)).
|
||||||
|
* Log process ID in each log entry ([#949](https://github.com/databricks/cli/pull/949)).
|
||||||
|
* Improve error message when `--json` flag is specified ([#933](https://github.com/databricks/cli/pull/933)).
|
||||||
|
|
||||||
|
Bundles:
|
||||||
|
* Remove validation for default value against pattern ([#959](https://github.com/databricks/cli/pull/959)).
|
||||||
|
* Bundle path rewrites for dbt and SQL file tasks ([#962](https://github.com/databricks/cli/pull/962)).
|
||||||
|
* Initialize variable definitions that are defined without properties ([#966](https://github.com/databricks/cli/pull/966)).
|
||||||
|
|
||||||
|
Internal:
|
||||||
|
* Function to merge two instances of `config.Value` ([#938](https://github.com/databricks/cli/pull/938)).
|
||||||
|
* Make to/from string methods private to the jsonschema package ([#942](https://github.com/databricks/cli/pull/942)).
|
||||||
|
* Make Cobra runner compatible with testing interactive flows ([#957](https://github.com/databricks/cli/pull/957)).
|
||||||
|
* Added `env.UserHomeDir(ctx)` for parallel-friendly tests ([#955](https://github.com/databricks/cli/pull/955)).
|
||||||
|
|
||||||
|
|
||||||
|
Dependency updates:
|
||||||
|
* Bump golang.org/x/mod from 0.13.0 to 0.14.0 ([#954](https://github.com/databricks/cli/pull/954)).
|
||||||
|
* Bump golang.org/x/text from 0.13.0 to 0.14.0 ([#953](https://github.com/databricks/cli/pull/953)).
|
||||||
|
* Bump golang.org/x/sync from 0.4.0 to 0.5.0 ([#951](https://github.com/databricks/cli/pull/951)).
|
||||||
|
* Bump github.com/spf13/cobra from 1.7.0 to 1.8.0 ([#950](https://github.com/databricks/cli/pull/950)).
|
||||||
|
* Bump github.com/fatih/color from 1.15.0 to 1.16.0 ([#952](https://github.com/databricks/cli/pull/952)).
|
||||||
|
|
||||||
|
## 0.209.0
|
||||||
|
|
||||||
|
CLI:
|
||||||
|
* Added GitHub issue templates for CLI and DABs issues ([#925](https://github.com/databricks/cli/pull/925)).
|
||||||
|
* Simplified code generation logic for handling path and request body parameters and JSON input ([#905](https://github.com/databricks/cli/pull/905)).
|
||||||
|
|
||||||
|
|
||||||
|
Bundles:
|
||||||
|
* Fixed URL for bundle template documentation in init command help docs ([#903](https://github.com/databricks/cli/pull/903)).
|
||||||
|
* Fixed pattern validation for input parameters in a bundle template ([#912](https://github.com/databricks/cli/pull/912)).
|
||||||
|
* Fixed multiline description rendering for enum input parameters in bundle templates ([#916](https://github.com/databricks/cli/pull/916)).
|
||||||
|
* Changed production mode check for whether identity used is a service principal to use UserName ([#924](https://github.com/databricks/cli/pull/924)).
|
||||||
|
* Changed bundle deploy to upload partial terraform state even if deployment fails ([#923](https://github.com/databricks/cli/pull/923)).
|
||||||
|
* Added support for welcome messages to bundle templates ([#907](https://github.com/databricks/cli/pull/907)).
|
||||||
|
* Added support for uploading bundle deployment metadata to WSFS ([#845](https://github.com/databricks/cli/pull/845)).
|
||||||
|
|
||||||
|
|
||||||
|
Internal:
|
||||||
|
* Loading an empty yaml file yields a nil ([#906](https://github.com/databricks/cli/pull/906)).
|
||||||
|
* Library to convert config.Value to Go struct ([#904](https://github.com/databricks/cli/pull/904)).
|
||||||
|
* Remove default resolution of repo names against the Databricks Github account([#940](https://github.com/databricks/cli/pull/940)).
|
||||||
|
* Run make fmt from fmt job ([#929](https://github.com/databricks/cli/pull/929)).
|
||||||
|
* `make snapshot` to build file in `.databricks/databricks` ([#927](https://github.com/databricks/cli/pull/927)).
|
||||||
|
* Add configuration normalization code ([#915](https://github.com/databricks/cli/pull/915)).
|
||||||
|
|
||||||
|
API Changes:
|
||||||
|
* Added `databricks account network-policy` command group.
|
||||||
|
|
||||||
|
Dependency updates:
|
||||||
|
* Bump Terraform provider from v1.28.0 to v1.29.0 ([#926](https://github.com/databricks/cli/pull/926)).
|
||||||
|
* Bump the Go SDK in the CLI from v0.23 to v0.24 ([#919](https://github.com/databricks/cli/pull/919)).
|
||||||
|
* Bump google.golang.org/grpc from 1.58.2 to 1.58.3 ([#920](https://github.com/databricks/cli/pull/920)).
|
||||||
|
* Bump github.com/google/uuid from 1.3.1 to 1.4.0 ([#932](https://github.com/databricks/cli/pull/932)).
|
||||||
|
|
||||||
|
OpenAPI commit 5903bb39137fd76ac384b2044e425f9c56840e00 (2023-10-23)
|
||||||
|
|
||||||
|
## 0.208.2
|
||||||
|
|
||||||
|
CLI:
|
||||||
|
* Never load authentication configuration from bundle for sync command ([#889](https://github.com/databricks/cli/pull/889)).
|
||||||
|
* Fixed requiring positional arguments for API URL parameters ([#878](https://github.com/databricks/cli/pull/878)).
|
||||||
|
|
||||||
|
Bundles:
|
||||||
|
* Add support for validating CLI version when loading a jsonschema object ([#883](https://github.com/databricks/cli/pull/883)).
|
||||||
|
* Do not emit wheel wrapper error when python_wheel_wrapper setting is true ([#894](https://github.com/databricks/cli/pull/894)).
|
||||||
|
* Resolve configuration before performing verification ([#890](https://github.com/databricks/cli/pull/890)).
|
||||||
|
* Fix wheel task not working with with 13.x clusters ([#898](https://github.com/databricks/cli/pull/898)).
|
||||||
|
|
||||||
|
Internal:
|
||||||
|
* Skip prompt on completion hook ([#888](https://github.com/databricks/cli/pull/888)).
|
||||||
|
* New YAML loader to support configuration location ([#828](https://github.com/databricks/cli/pull/828)).
|
||||||
|
|
||||||
|
Dependency updates:
|
||||||
|
* Bump github.com/mattn/go-isatty from 0.0.19 to 0.0.20 ([#896](https://github.com/databricks/cli/pull/896)).
|
||||||
|
|
||||||
|
## 0.208.1
|
||||||
|
|
||||||
|
CLI:
|
||||||
|
* Fix rendering of streaming response ([#876](https://github.com/databricks/cli/pull/876)).
|
||||||
|
|
||||||
|
Bundles:
|
||||||
|
* Rename MLOps Stack to MLOps Stacks ([#881](https://github.com/databricks/cli/pull/881)).
|
||||||
|
* Support Python wheels larger than 10MB ([#879](https://github.com/databricks/cli/pull/879)).
|
||||||
|
* Improve the output of the `databricks bundle init` command ([#795](https://github.com/databricks/cli/pull/795)).
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
## 0.208.0
|
||||||
|
|
||||||
|
Note: this release includes a fix for the issue where zero values (for example
|
||||||
|
`num_workers: 0`) were not included in the request body.
|
||||||
|
|
||||||
|
CLI:
|
||||||
|
* Use already instantiated WorkspaceClient in sync command ([#867](https://github.com/databricks/cli/pull/867)).
|
||||||
|
|
||||||
|
Bundles:
|
||||||
|
* Support Unity Catalog Registered Models in bundles ([#846](https://github.com/databricks/cli/pull/846)).
|
||||||
|
* Fixed merging task libraries from targets ([#868](https://github.com/databricks/cli/pull/868)).
|
||||||
|
* Add alias for mlops-stack template URL ([#869](https://github.com/databricks/cli/pull/869)).
|
||||||
|
|
||||||
|
API Changes:
|
||||||
|
* Changed `databricks account billable-usage download` command to start returning output.
|
||||||
|
* Changed `databricks account storage-credentials delete` command with new required argument order.
|
||||||
|
* Changed `databricks account storage-credentials get` command with new required argument order.
|
||||||
|
* Changed `databricks account storage-credentials update` command with new required argument order.
|
||||||
|
* Added `databricks workspace-bindings get-bindings` command.
|
||||||
|
* Added `databricks workspace-bindings update-bindings` command.
|
||||||
|
* Removed `databricks account network-policy` command group.
|
||||||
|
* Changed `databricks ip-access-lists list` command to return output.
|
||||||
|
|
||||||
|
OpenAPI commit 493a76554afd3afdd15dc858773d01643f80352a (2023-10-12)
|
||||||
|
|
||||||
|
Dependency updates:
|
||||||
|
* Update Go SDK to 0.23.0 and use custom marshaller ([#772](https://github.com/databricks/cli/pull/772)).
|
||||||
|
* Bump Terraform provider to v1.28.0 ([#871](https://github.com/databricks/cli/pull/871)).
|
||||||
|
* Bump golang.org/x/net from 0.16.0 to 0.17.0 ([#863](https://github.com/databricks/cli/pull/863)).
|
||||||
|
* Bump github.com/hashicorp/hc-install from 0.6.0 to 0.6.1 ([#870](https://github.com/databricks/cli/pull/870)).
|
||||||
|
|
||||||
|
## 0.207.1
|
||||||
|
|
||||||
|
CLI:
|
||||||
|
* Improve `workspace import` command by allowing references to local files for content ([#793](https://github.com/databricks/cli/pull/793)).
|
||||||
|
* Add `--file` flag to workspace export command ([#794](https://github.com/databricks/cli/pull/794)).
|
||||||
|
* Ensure profile flag is respected for sync command ([#837](https://github.com/databricks/cli/pull/837)).
|
||||||
|
* Add hint to delete sync snapshot if parsing fails ([#853](https://github.com/databricks/cli/pull/853)).
|
||||||
|
* Use profile information when getting a token using the CLI ([#855](https://github.com/databricks/cli/pull/855)).
|
||||||
|
|
||||||
|
Bundles:
|
||||||
|
* Minor template tweaks ([#832](https://github.com/databricks/cli/pull/832)).
|
||||||
|
* Fixed using repo files as pipeline libraries ([#847](https://github.com/databricks/cli/pull/847)).
|
||||||
|
* Support .gitignore syntax in sync section and make sure it works recursively ([#854](https://github.com/databricks/cli/pull/854)).
|
||||||
|
* Allow target overrides for sync section ([#856](https://github.com/databricks/cli/pull/856)).
|
||||||
|
|
||||||
|
Internal:
|
||||||
|
* Fix import export integration tests on windows ([#842](https://github.com/databricks/cli/pull/842)).
|
||||||
|
* Fix workspace import test ([#844](https://github.com/databricks/cli/pull/844)).
|
||||||
|
* Automatically create a release PR in homebrew-tap repo ([#841](https://github.com/databricks/cli/pull/841)).
|
||||||
|
|
||||||
|
|
||||||
|
Dependency updates:
|
||||||
|
* Bump golang.org/x/term from 0.12.0 to 0.13.0 ([#852](https://github.com/databricks/cli/pull/852)).
|
||||||
|
* Bump golang.org/x/mod from 0.12.0 to 0.13.0 ([#851](https://github.com/databricks/cli/pull/851)).
|
||||||
|
* Bump golang.org/x/sync from 0.3.0 to 0.4.0 ([#849](https://github.com/databricks/cli/pull/849)).
|
||||||
|
* Bump golang.org/x/oauth2 from 0.12.0 to 0.13.0 ([#850](https://github.com/databricks/cli/pull/850)).
|
||||||
|
|
||||||
|
## 0.207.0
|
||||||
|
|
||||||
|
CLI:
|
||||||
|
* Refactor change computation for sync ([#785](https://github.com/databricks/cli/pull/785)).
|
||||||
|
|
||||||
|
Bundles:
|
||||||
|
* Allow digits in the generated short name ([#820](https://github.com/databricks/cli/pull/820)).
|
||||||
|
* Emit an error when incompatible all purpose cluster used with Python wheel tasks ([#823](https://github.com/databricks/cli/pull/823)).
|
||||||
|
* Use normalized short name for tag value in development mode ([#821](https://github.com/databricks/cli/pull/821)).
|
||||||
|
* Added `python.DetectInterpreters` and other utils ([#805](https://github.com/databricks/cli/pull/805)).
|
||||||
|
* Mark artifacts properties as optional ([#834](https://github.com/databricks/cli/pull/834)).
|
||||||
|
* Added support for glob patterns in pipeline libraries section ([#833](https://github.com/databricks/cli/pull/833)).
|
||||||
|
|
||||||
|
Internal:
|
||||||
|
* Run tests to verify backend tag validation behavior ([#814](https://github.com/databricks/cli/pull/814)).
|
||||||
|
* Library to validate and normalize cloud specific tags ([#819](https://github.com/databricks/cli/pull/819)).
|
||||||
|
* Added test to submit and run various Python tasks on multiple DBR versions ([#806](https://github.com/databricks/cli/pull/806)).
|
||||||
|
* Create a release PR in setup-cli repo on tag push ([#827](https://github.com/databricks/cli/pull/827)).
|
||||||
|
|
||||||
|
API Changes:
|
||||||
|
* Changed `databricks account metastore-assignments list` command to return .
|
||||||
|
* Changed `databricks jobs cancel-all-runs` command with new required argument order.
|
||||||
|
* Added `databricks account o-auth-published-apps` command group.
|
||||||
|
* Changed `databricks serving-endpoints query` command . New request type is .
|
||||||
|
* Added `databricks serving-endpoints patch` command.
|
||||||
|
* Added `databricks credentials-manager` command group.
|
||||||
|
* Added `databricks settings` command group.
|
||||||
|
* Changed `databricks clean-rooms list` command to require request of .
|
||||||
|
* Changed `databricks statement-execution execute-statement` command with new required argument order.
|
||||||
|
|
||||||
|
OpenAPI commit bcbf6e851e3d82fd910940910dd31c10c059746c (2023-10-02)
|
||||||
|
Dependency updates:
|
||||||
|
* Bump github.com/google/uuid from 1.3.0 to 1.3.1 ([#825](https://github.com/databricks/cli/pull/825)).
|
||||||
|
* Updated Go SDK to 0.22.0 ([#831](https://github.com/databricks/cli/pull/831)).
|
||||||
|
|
||||||
|
## 0.206.0
|
||||||
|
|
||||||
|
Bundles:
|
||||||
|
* Enable target overrides for pipeline clusters ([#792](https://github.com/databricks/cli/pull/792)).
|
||||||
|
* Add support for regex patterns in template schema ([#768](https://github.com/databricks/cli/pull/768)).
|
||||||
|
* Make the default `databricks bundle init` template more self-explanatory ([#796](https://github.com/databricks/cli/pull/796)).
|
||||||
|
* Make a notebook wrapper for Python wheel tasks optional ([#797](https://github.com/databricks/cli/pull/797)).
|
||||||
|
* Added a warning when Python wheel wrapper needs to be used ([#807](https://github.com/databricks/cli/pull/807)).
|
||||||
|
|
||||||
|
Internal:
|
||||||
|
* Added `process.Background()` and `process.Forwarded()` ([#804](https://github.com/databricks/cli/pull/804)).
|
||||||
|
|
||||||
|
Dependency updates:
|
||||||
|
* Bump golang.org/x/term from 0.11.0 to 0.12.0 ([#798](https://github.com/databricks/cli/pull/798)).
|
||||||
|
* Bump github.com/hashicorp/terraform-exec from 0.18.1 to 0.19.0 ([#801](https://github.com/databricks/cli/pull/801)).
|
||||||
|
* Bump golang.org/x/oauth2 from 0.11.0 to 0.12.0 ([#802](https://github.com/databricks/cli/pull/802)).
|
||||||
|
|
||||||
|
## 0.205.2
|
||||||
|
|
||||||
|
CLI:
|
||||||
|
* Prompt for profile only in interactive mode ([#788](https://github.com/databricks/cli/pull/788)).
|
||||||
|
|
||||||
|
Internal:
|
||||||
|
* Added setup Python action ([#789](https://github.com/databricks/cli/pull/789)).
|
||||||
|
|
||||||
|
|
||||||
|
## 0.205.1
|
||||||
|
|
||||||
|
Bundles:
|
||||||
|
* Use enums for default python template ([#765](https://github.com/databricks/cli/pull/765)).
|
||||||
|
* Make bundle deploy work if no resources are defined ([#767](https://github.com/databricks/cli/pull/767)).
|
||||||
|
* Added support for experimental scripts section ([#632](https://github.com/databricks/cli/pull/632)).
|
||||||
|
* Error when unknown keys are encounters during template execution ([#766](https://github.com/databricks/cli/pull/766)).
|
||||||
|
* Fall back to full Git clone if shallow clone is not supported ([#775](https://github.com/databricks/cli/pull/775)).
|
||||||
|
* Enable environment overrides for job tasks ([#779](https://github.com/databricks/cli/pull/779)).
|
||||||
|
* Increase timeout waiting for job run to 1 day ([#786](https://github.com/databricks/cli/pull/786)).
|
||||||
|
|
||||||
|
Internal:
|
||||||
|
* Update Go SDK to v0.19.3 (unreleased) ([#778](https://github.com/databricks/cli/pull/778)).
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
## 0.205.0
|
||||||
|
|
||||||
|
This release marks the public preview phase of Databricks Asset Bundles.
|
||||||
|
|
||||||
|
For more information, please refer to our online documentation at
|
||||||
|
https://docs.databricks.com/en/dev-tools/bundles/.
|
||||||
|
|
||||||
|
CLI:
|
||||||
|
* Prompt once for a client profile ([#727](https://github.com/databricks/cli/pull/727)).
|
||||||
|
|
||||||
|
Bundles:
|
||||||
|
* Use clearer error message when no interpolation value is found. ([#764](https://github.com/databricks/cli/pull/764)).
|
||||||
|
* Use interactive prompt to select resource to run if not specified ([#762](https://github.com/databricks/cli/pull/762)).
|
||||||
|
* Add documentation link bundle command group description ([#770](https://github.com/databricks/cli/pull/770)).
|
||||||
|
|
||||||
|
|
||||||
|
## 0.204.1
|
||||||
|
|
||||||
|
Bundles:
|
||||||
|
* Fix conversion of job parameters ([#744](https://github.com/databricks/cli/pull/744)).
|
||||||
|
* Add schema and config validation to jsonschema package ([#740](https://github.com/databricks/cli/pull/740)).
|
||||||
|
* Support Model Serving Endpoints in bundles ([#682](https://github.com/databricks/cli/pull/682)).
|
||||||
|
* Do not include empty output in job run output ([#749](https://github.com/databricks/cli/pull/749)).
|
||||||
|
* Fixed marking libraries from DBFS as remote ([#750](https://github.com/databricks/cli/pull/750)).
|
||||||
|
* Process only Python wheel tasks which have local libraries used ([#751](https://github.com/databricks/cli/pull/751)).
|
||||||
|
* Add enum support for bundle templates ([#668](https://github.com/databricks/cli/pull/668)).
|
||||||
|
* Apply Python wheel trampoline if workspace library is used ([#755](https://github.com/databricks/cli/pull/755)).
|
||||||
|
* List available targets when incorrect target passed ([#756](https://github.com/databricks/cli/pull/756)).
|
||||||
|
* Make bundle and sync fields optional ([#757](https://github.com/databricks/cli/pull/757)).
|
||||||
|
* Consolidate environment variable interaction ([#747](https://github.com/databricks/cli/pull/747)).
|
||||||
|
|
||||||
|
Internal:
|
||||||
|
* Update Go SDK to v0.19.1 ([#759](https://github.com/databricks/cli/pull/759)).
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
## 0.204.0
|
||||||
|
|
||||||
|
This release includes permission related commands for a subset of workspace
|
||||||
|
services where they apply. These complement the `permissions` command and
|
||||||
|
do not require specification of the object type to work with, as that is
|
||||||
|
implied by the command they are nested under.
|
||||||
|
|
||||||
|
CLI:
|
||||||
|
* Group permission related commands ([#730](https://github.com/databricks/cli/pull/730)).
|
||||||
|
|
||||||
|
Bundles:
|
||||||
|
* Fixed artifact file uploading on Windows and wheel execution on DBR 13.3 ([#722](https://github.com/databricks/cli/pull/722)).
|
||||||
|
* Make resource and artifact paths in bundle config relative to config folder ([#708](https://github.com/databricks/cli/pull/708)).
|
||||||
|
* Add support for ordering of input prompts ([#662](https://github.com/databricks/cli/pull/662)).
|
||||||
|
* Fix IsServicePrincipal() only working for workspace admins ([#732](https://github.com/databricks/cli/pull/732)).
|
||||||
|
* databricks bundle init template v1 ([#686](https://github.com/databricks/cli/pull/686)).
|
||||||
|
* databricks bundle init template v2: optional stubs, DLT support ([#700](https://github.com/databricks/cli/pull/700)).
|
||||||
|
* Show 'databricks bundle init' template in CLI prompt ([#725](https://github.com/databricks/cli/pull/725)).
|
||||||
|
* Include in set of environment variables to pass along. ([#736](https://github.com/databricks/cli/pull/736)).
|
||||||
|
|
||||||
|
Internal:
|
||||||
|
* Update Go SDK to v0.19.0 ([#729](https://github.com/databricks/cli/pull/729)).
|
||||||
|
* Replace API call to test configuration with dummy authenticate call ([#728](https://github.com/databricks/cli/pull/728)).
|
||||||
|
|
||||||
|
API Changes:
|
||||||
|
* Changed `databricks account storage-credentials create` command to return .
|
||||||
|
* Changed `databricks account storage-credentials get` command to return .
|
||||||
|
* Changed `databricks account storage-credentials list` command to return .
|
||||||
|
* Changed `databricks account storage-credentials update` command to return .
|
||||||
|
* Changed `databricks connections create` command with new required argument order.
|
||||||
|
* Changed `databricks connections update` command with new required argument order.
|
||||||
|
* Changed `databricks volumes create` command with new required argument order.
|
||||||
|
* Added `databricks artifact-allowlists` command group.
|
||||||
|
* Added `databricks model-versions` command group.
|
||||||
|
* Added `databricks registered-models` command group.
|
||||||
|
* Added `databricks cluster-policies get-permission-levels` command.
|
||||||
|
* Added `databricks cluster-policies get-permissions` command.
|
||||||
|
* Added `databricks cluster-policies set-permissions` command.
|
||||||
|
* Added `databricks cluster-policies update-permissions` command.
|
||||||
|
* Added `databricks clusters get-permission-levels` command.
|
||||||
|
* Added `databricks clusters get-permissions` command.
|
||||||
|
* Added `databricks clusters set-permissions` command.
|
||||||
|
* Added `databricks clusters update-permissions` command.
|
||||||
|
* Added `databricks instance-pools get-permission-levels` command.
|
||||||
|
* Added `databricks instance-pools get-permissions` command.
|
||||||
|
* Added `databricks instance-pools set-permissions` command.
|
||||||
|
* Added `databricks instance-pools update-permissions` command.
|
||||||
|
* Added `databricks files` command group.
|
||||||
|
* Changed `databricks permissions set` command to start returning .
|
||||||
|
* Changed `databricks permissions update` command to start returning .
|
||||||
|
* Added `databricks users get-permission-levels` command.
|
||||||
|
* Added `databricks users get-permissions` command.
|
||||||
|
* Added `databricks users set-permissions` command.
|
||||||
|
* Added `databricks users update-permissions` command.
|
||||||
|
* Added `databricks jobs get-permission-levels` command.
|
||||||
|
* Added `databricks jobs get-permissions` command.
|
||||||
|
* Added `databricks jobs set-permissions` command.
|
||||||
|
* Added `databricks jobs update-permissions` command.
|
||||||
|
* Changed `databricks experiments get-by-name` command to return .
|
||||||
|
* Changed `databricks experiments get-experiment` command to return .
|
||||||
|
* Added `databricks experiments delete-runs` command.
|
||||||
|
* Added `databricks experiments get-permission-levels` command.
|
||||||
|
* Added `databricks experiments get-permissions` command.
|
||||||
|
* Added `databricks experiments restore-runs` command.
|
||||||
|
* Added `databricks experiments set-permissions` command.
|
||||||
|
* Added `databricks experiments update-permissions` command.
|
||||||
|
* Added `databricks model-registry get-permission-levels` command.
|
||||||
|
* Added `databricks model-registry get-permissions` command.
|
||||||
|
* Added `databricks model-registry set-permissions` command.
|
||||||
|
* Added `databricks model-registry update-permissions` command.
|
||||||
|
* Added `databricks pipelines get-permission-levels` command.
|
||||||
|
* Added `databricks pipelines get-permissions` command.
|
||||||
|
* Added `databricks pipelines set-permissions` command.
|
||||||
|
* Added `databricks pipelines update-permissions` command.
|
||||||
|
* Added `databricks serving-endpoints get-permission-levels` command.
|
||||||
|
* Added `databricks serving-endpoints get-permissions` command.
|
||||||
|
* Added `databricks serving-endpoints set-permissions` command.
|
||||||
|
* Added `databricks serving-endpoints update-permissions` command.
|
||||||
|
* Added `databricks token-management get-permission-levels` command.
|
||||||
|
* Added `databricks token-management get-permissions` command.
|
||||||
|
* Added `databricks token-management set-permissions` command.
|
||||||
|
* Added `databricks token-management update-permissions` command.
|
||||||
|
* Changed `databricks dashboards create` command with new required argument order.
|
||||||
|
* Added `databricks warehouses get-permission-levels` command.
|
||||||
|
* Added `databricks warehouses get-permissions` command.
|
||||||
|
* Added `databricks warehouses set-permissions` command.
|
||||||
|
* Added `databricks warehouses update-permissions` command.
|
||||||
|
* Added `databricks dashboard-widgets` command group.
|
||||||
|
* Added `databricks query-visualizations` command group.
|
||||||
|
* Added `databricks repos get-permission-levels` command.
|
||||||
|
* Added `databricks repos get-permissions` command.
|
||||||
|
* Added `databricks repos set-permissions` command.
|
||||||
|
* Added `databricks repos update-permissions` command.
|
||||||
|
* Added `databricks secrets get-secret` command.
|
||||||
|
* Added `databricks workspace get-permission-levels` command.
|
||||||
|
* Added `databricks workspace get-permissions` command.
|
||||||
|
* Added `databricks workspace set-permissions` command.
|
||||||
|
* Added `databricks workspace update-permissions` command.
|
||||||
|
|
||||||
|
OpenAPI commit 09a7fa63d9ae243e5407941f200960ca14d48b07 (2023-09-04)
|
||||||
|
|
||||||
|
## 0.203.3
|
||||||
|
|
||||||
|
Bundles:
|
||||||
|
* Support cluster overrides with cluster_key and compute_key ([#696](https://github.com/databricks/cli/pull/696)).
|
||||||
|
* Allow referencing local Python wheels without artifacts section defined ([#703](https://github.com/databricks/cli/pull/703)).
|
||||||
|
* Fixed --environment flag ([#705](https://github.com/databricks/cli/pull/705)).
|
||||||
|
* Correctly identify local paths in libraries section ([#702](https://github.com/databricks/cli/pull/702)).
|
||||||
|
* Fixed path joining in FindFilesWithSuffixInPath ([#704](https://github.com/databricks/cli/pull/704)).
|
||||||
|
* Added transformation mutator for Python wheel task for them to work on DBR <13.1 ([#635](https://github.com/databricks/cli/pull/635)).
|
||||||
|
|
||||||
|
Internal:
|
||||||
|
* Add a foundation for built-in templates ([#685](https://github.com/databricks/cli/pull/685)).
|
||||||
|
* Test transform when no Python wheel tasks defined ([#714](https://github.com/databricks/cli/pull/714)).
|
||||||
|
* Pin Terraform binary version to 1.5.5 ([#715](https://github.com/databricks/cli/pull/715)).
|
||||||
|
* Cleanup after "Add a foundation for built-in templates" ([#707](https://github.com/databricks/cli/pull/707)).
|
||||||
|
* Filter down to Python wheel tasks only for trampoline ([#712](https://github.com/databricks/cli/pull/712)).
|
||||||
|
* Update Terraform provider schema structs from 1.23.0 ([#713](https://github.com/databricks/cli/pull/713)).
|
||||||
|
|
||||||
|
## 0.203.2
|
||||||
|
|
||||||
|
CLI:
|
||||||
|
* Added `databricks account o-auth-enrollment enable` command ([#687](https://github.com/databricks/cli/pull/687)).
|
||||||
|
|
||||||
|
Bundles:
|
||||||
|
* Do not try auto detect Python package if no Python wheel tasks defined ([#674](https://github.com/databricks/cli/pull/674)).
|
||||||
|
* Renamed `environments` to `targets` in bundle configuration ([#670](https://github.com/databricks/cli/pull/670)).
|
||||||
|
* Rename init project-dir flag to output-dir ([#676](https://github.com/databricks/cli/pull/676)).
|
||||||
|
* Added support for sync.include and sync.exclude sections ([#671](https://github.com/databricks/cli/pull/671)).
|
||||||
|
* Add template directory flag for bundle templates ([#675](https://github.com/databricks/cli/pull/675)).
|
||||||
|
* Never ignore root directory when enumerating files in a repository ([#683](https://github.com/databricks/cli/pull/683)).
|
||||||
|
* Improve 'mode' error message ([#681](https://github.com/databricks/cli/pull/681)).
|
||||||
|
* Added run_as section for bundle configuration ([#692](https://github.com/databricks/cli/pull/692)).
|
||||||
|
|
||||||
|
## 0.203.1
|
||||||
|
|
||||||
|
CLI:
|
||||||
|
* Always resolve .databrickscfg file ([#659](https://github.com/databricks/cli/pull/659)).
|
||||||
|
|
||||||
|
Bundles:
|
||||||
|
* Add internal tag for bundle fields to be skipped from schema ([#636](https://github.com/databricks/cli/pull/636)).
|
||||||
|
* Log the bundle root configuration file if applicable ([#657](https://github.com/databricks/cli/pull/657)).
|
||||||
|
* Execute paths without the .tmpl extension as templates ([#654](https://github.com/databricks/cli/pull/654)).
|
||||||
|
* Enable environment overrides for job clusters ([#658](https://github.com/databricks/cli/pull/658)).
|
||||||
|
* Merge artifacts and resources block with overrides enabled ([#660](https://github.com/databricks/cli/pull/660)).
|
||||||
|
* Locked terraform binary version to <= 1.5.5 ([#666](https://github.com/databricks/cli/pull/666)).
|
||||||
|
* Return better error messages for invalid JSON schema types in templates ([#661](https://github.com/databricks/cli/pull/661)).
|
||||||
|
* Use custom prompter for bundle template inputs ([#663](https://github.com/databricks/cli/pull/663)).
|
||||||
|
* Add map and pair helper functions for bundle templates ([#665](https://github.com/databricks/cli/pull/665)).
|
||||||
|
* Correct name for force acquire deploy flag ([#656](https://github.com/databricks/cli/pull/656)).
|
||||||
|
* Confirm that override with a zero value doesn't work ([#669](https://github.com/databricks/cli/pull/669)).
|
||||||
|
|
||||||
|
Internal:
|
||||||
|
* Consolidate functions in libs/git ([#652](https://github.com/databricks/cli/pull/652)).
|
||||||
|
* Upgraded Go version to 1.21 ([#664](https://github.com/databricks/cli/pull/664)).
|
||||||
|
|
||||||
|
## 0.203.0
|
||||||
|
|
||||||
|
CLI:
|
||||||
|
* Infer host from profile during `auth login` ([#629](https://github.com/databricks/cli/pull/629)).
|
||||||
|
|
||||||
|
Bundles:
|
||||||
|
* Extend deployment mode support ([#577](https://github.com/databricks/cli/pull/577)).
|
||||||
|
* Add validation for Git settings in bundles ([#578](https://github.com/databricks/cli/pull/578)).
|
||||||
|
* Only treat files with .tmpl extension as templates ([#594](https://github.com/databricks/cli/pull/594)).
|
||||||
|
* Add JSON schema validation for input template parameters ([#598](https://github.com/databricks/cli/pull/598)).
|
||||||
|
* Add DATABRICKS_BUNDLE_INCLUDE_PATHS to specify include paths through env vars ([#591](https://github.com/databricks/cli/pull/591)).
|
||||||
|
* Initialise a empty default bundle if BUNDLE_ROOT and DATABRICKS_BUNDLE_INCLUDES env vars are present ([#604](https://github.com/databricks/cli/pull/604)).
|
||||||
|
* Regenerate bundle resource structs from latest Terraform provider ([#633](https://github.com/databricks/cli/pull/633)).
|
||||||
|
* Fixed processing jobs libraries with remote path ([#638](https://github.com/databricks/cli/pull/638)).
|
||||||
|
* Add unit test for file name execution during rendering ([#640](https://github.com/databricks/cli/pull/640)).
|
||||||
|
* Add bundle init command and support for prompting user for input values ([#631](https://github.com/databricks/cli/pull/631)).
|
||||||
|
* Fix bundle git branch validation ([#645](https://github.com/databricks/cli/pull/645)).
|
||||||
|
|
||||||
|
Internal:
|
||||||
|
* Fix mkdir integration test on GCP ([#620](https://github.com/databricks/cli/pull/620)).
|
||||||
|
* Fix git clone integration test for non-existing repo ([#610](https://github.com/databricks/cli/pull/610)).
|
||||||
|
* Remove push to main trigger for build workflow ([#621](https://github.com/databricks/cli/pull/621)).
|
||||||
|
* Remove workflow to publish binaries to S3 ([#622](https://github.com/databricks/cli/pull/622)).
|
||||||
|
* Fix failing fs mkdir test on azure ([#627](https://github.com/databricks/cli/pull/627)).
|
||||||
|
* Print y/n options when displaying prompts using cmdio.Ask ([#650](https://github.com/databricks/cli/pull/650)).
|
||||||
|
|
||||||
|
API Changes:
|
||||||
|
* Changed `databricks account metastore-assignments create` command to not return anything.
|
||||||
|
* Added `databricks account network-policy` command group.
|
||||||
|
|
||||||
|
OpenAPI commit 7b57ba3a53f4de3d049b6a24391fe5474212daf8 (2023-07-28)
|
||||||
|
|
||||||
|
Dependency updates:
|
||||||
|
* Bump OpenAPI specification & Go SDK Version ([#624](https://github.com/databricks/cli/pull/624)).
|
||||||
|
* Bump golang.org/x/term from 0.10.0 to 0.11.0 ([#643](https://github.com/databricks/cli/pull/643)).
|
||||||
|
* Bump golang.org/x/text from 0.11.0 to 0.12.0 ([#642](https://github.com/databricks/cli/pull/642)).
|
||||||
|
* Bump golang.org/x/oauth2 from 0.10.0 to 0.11.0 ([#641](https://github.com/databricks/cli/pull/641)).
|
||||||
|
|
||||||
## 0.202.0
|
## 0.202.0
|
||||||
|
|
||||||
Breaking Change:
|
Breaking Change:
|
||||||
|
|
3
Makefile
3
Makefile
|
@ -24,10 +24,11 @@ build: vendor
|
||||||
|
|
||||||
snapshot:
|
snapshot:
|
||||||
@echo "✓ Building dev snapshot"
|
@echo "✓ Building dev snapshot"
|
||||||
@goreleaser build --snapshot --clean --single-target
|
@go build -o .databricks/databricks
|
||||||
|
|
||||||
vendor:
|
vendor:
|
||||||
@echo "✓ Filling vendor folder with library code ..."
|
@echo "✓ Filling vendor folder with library code ..."
|
||||||
@go mod vendor
|
@go mod vendor
|
||||||
|
|
||||||
.PHONY: build vendor coverage test lint fmt
|
.PHONY: build vendor coverage test lint fmt
|
||||||
|
|
||||||
|
|
4
NOTICE
4
NOTICE
|
@ -16,6 +16,10 @@ go-ini/ini - https://github.com/go-ini/ini
|
||||||
Copyright ini authors
|
Copyright ini authors
|
||||||
License - https://github.com/go-ini/ini/blob/main/LICENSE
|
License - https://github.com/go-ini/ini/blob/main/LICENSE
|
||||||
|
|
||||||
|
uber-go/mock - https://go.uber.org/mock
|
||||||
|
Copyright Google Inc.
|
||||||
|
License - https://github.com/uber-go/mock/blob/main/LICENSE
|
||||||
|
|
||||||
—--
|
—--
|
||||||
|
|
||||||
This software contains code from the following open source projects, licensed under the MPL 2.0 license:
|
This software contains code from the following open source projects, licensed under the MPL 2.0 license:
|
||||||
|
|
|
@ -4,9 +4,10 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
|
"slices"
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle"
|
"github.com/databricks/cli/bundle"
|
||||||
"golang.org/x/exp/maps"
|
"golang.org/x/exp/maps"
|
||||||
"golang.org/x/exp/slices"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// all is an internal proxy for producing a list of mutators for all artifacts.
|
// all is an internal proxy for producing a list of mutators for all artifacts.
|
||||||
|
|
|
@ -1,19 +1,20 @@
|
||||||
package artifacts
|
package artifacts
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"crypto/sha256"
|
|
||||||
"encoding/base64"
|
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
|
"path/filepath"
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle"
|
"github.com/databricks/cli/bundle"
|
||||||
"github.com/databricks/cli/bundle/artifacts/whl"
|
"github.com/databricks/cli/bundle/artifacts/whl"
|
||||||
"github.com/databricks/cli/bundle/config"
|
"github.com/databricks/cli/bundle/config"
|
||||||
"github.com/databricks/cli/libs/cmdio"
|
"github.com/databricks/cli/libs/cmdio"
|
||||||
"github.com/databricks/databricks-sdk-go/service/workspace"
|
"github.com/databricks/cli/libs/filer"
|
||||||
|
"github.com/databricks/cli/libs/log"
|
||||||
)
|
)
|
||||||
|
|
||||||
type mutatorFactory = func(name string) bundle.Mutator
|
type mutatorFactory = func(name string) bundle.Mutator
|
||||||
|
@ -61,13 +62,13 @@ func (m *basicBuild) Apply(ctx context.Context, b *bundle.Bundle) error {
|
||||||
return fmt.Errorf("artifact doesn't exist: %s", m.name)
|
return fmt.Errorf("artifact doesn't exist: %s", m.name)
|
||||||
}
|
}
|
||||||
|
|
||||||
cmdio.LogString(ctx, fmt.Sprintf("artifacts.Build(%s): Building...", m.name))
|
cmdio.LogString(ctx, fmt.Sprintf("Building %s...", m.name))
|
||||||
|
|
||||||
out, err := artifact.Build(ctx)
|
out, err := artifact.Build(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("artifacts.Build(%s): %w, output: %s", m.name, err, out)
|
return fmt.Errorf("build for %s failed, error: %w, output: %s", m.name, err, out)
|
||||||
}
|
}
|
||||||
cmdio.LogString(ctx, fmt.Sprintf("artifacts.Build(%s): Build succeeded", m.name))
|
log.Infof(ctx, "Build succeeded")
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -82,7 +83,7 @@ func BasicUpload(name string) bundle.Mutator {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *basicUpload) Name() string {
|
func (m *basicUpload) Name() string {
|
||||||
return fmt.Sprintf("artifacts.Build(%s)", m.name)
|
return fmt.Sprintf("artifacts.Upload(%s)", m.name)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *basicUpload) Apply(ctx context.Context, b *bundle.Bundle) error {
|
func (m *basicUpload) Apply(ctx context.Context, b *bundle.Bundle) error {
|
||||||
|
@ -95,27 +96,37 @@ func (m *basicUpload) Apply(ctx context.Context, b *bundle.Bundle) error {
|
||||||
return fmt.Errorf("artifact source is not configured: %s", m.name)
|
return fmt.Errorf("artifact source is not configured: %s", m.name)
|
||||||
}
|
}
|
||||||
|
|
||||||
err := uploadArtifact(ctx, artifact, b)
|
uploadPath, err := getUploadBasePath(b)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("artifacts.Upload(%s): %w", m.name, err)
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
client, err := filer.NewWorkspaceFilesClient(b.WorkspaceClient(), uploadPath)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = uploadArtifact(ctx, artifact, uploadPath, client)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("upload for %s failed, error: %w", m.name, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func uploadArtifact(ctx context.Context, a *config.Artifact, b *bundle.Bundle) error {
|
func uploadArtifact(ctx context.Context, a *config.Artifact, uploadPath string, client filer.Filer) error {
|
||||||
for i := range a.Files {
|
for i := range a.Files {
|
||||||
f := &a.Files[i]
|
f := &a.Files[i]
|
||||||
if f.NeedsUpload() {
|
if f.NeedsUpload() {
|
||||||
filename := path.Base(f.Source)
|
filename := filepath.Base(f.Source)
|
||||||
cmdio.LogString(ctx, fmt.Sprintf("artifacts.Upload(%s): Uploading...", filename))
|
cmdio.LogString(ctx, fmt.Sprintf("Uploading %s...", filename))
|
||||||
remotePath, err := uploadArtifactFile(ctx, f.Source, b)
|
|
||||||
|
err := uploadArtifactFile(ctx, f.Source, client)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
cmdio.LogString(ctx, fmt.Sprintf("artifacts.Upload(%s): Upload succeeded", filename))
|
log.Infof(ctx, "Upload succeeded")
|
||||||
|
f.RemotePath = path.Join(uploadPath, filepath.Base(f.Source))
|
||||||
f.RemotePath = remotePath
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -124,41 +135,23 @@ func uploadArtifact(ctx context.Context, a *config.Artifact, b *bundle.Bundle) e
|
||||||
}
|
}
|
||||||
|
|
||||||
// Function to upload artifact file to Workspace
|
// Function to upload artifact file to Workspace
|
||||||
func uploadArtifactFile(ctx context.Context, file string, b *bundle.Bundle) (string, error) {
|
func uploadArtifactFile(ctx context.Context, file string, client filer.Filer) error {
|
||||||
raw, err := os.ReadFile(file)
|
raw, err := os.ReadFile(file)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", fmt.Errorf("unable to read %s: %w", file, errors.Unwrap(err))
|
return fmt.Errorf("unable to read %s: %w", file, errors.Unwrap(err))
|
||||||
}
|
}
|
||||||
|
|
||||||
uploadPath, err := getUploadBasePath(b)
|
filename := filepath.Base(file)
|
||||||
|
err = client.Write(ctx, filename, bytes.NewReader(raw), filer.OverwriteIfExists, filer.CreateParentDirectories)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return fmt.Errorf("unable to import %s: %w", filename, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
fileHash := sha256.Sum256(raw)
|
return nil
|
||||||
remotePath := path.Join(uploadPath, fmt.Sprintf("%x", fileHash), path.Base(file))
|
|
||||||
// Make sure target directory exists.
|
|
||||||
err = b.WorkspaceClient().Workspace.MkdirsByPath(ctx, path.Dir(remotePath))
|
|
||||||
if err != nil {
|
|
||||||
return "", fmt.Errorf("unable to create directory for %s: %w", remotePath, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Import to workspace.
|
|
||||||
err = b.WorkspaceClient().Workspace.Import(ctx, workspace.Import{
|
|
||||||
Path: remotePath,
|
|
||||||
Overwrite: true,
|
|
||||||
Format: workspace.ImportFormatAuto,
|
|
||||||
Content: base64.StdEncoding.EncodeToString(raw),
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return "", fmt.Errorf("unable to import %s: %w", remotePath, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return remotePath, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func getUploadBasePath(b *bundle.Bundle) (string, error) {
|
func getUploadBasePath(b *bundle.Bundle) (string, error) {
|
||||||
artifactPath := b.Config.Workspace.ArtifactsPath
|
artifactPath := b.Config.Workspace.ArtifactPath
|
||||||
if artifactPath == "" {
|
if artifactPath == "" {
|
||||||
return "", fmt.Errorf("remote artifact path not configured")
|
return "", fmt.Errorf("remote artifact path not configured")
|
||||||
}
|
}
|
||||||
|
|
|
@ -28,5 +28,6 @@ func (m *autodetect) Apply(ctx context.Context, b *bundle.Bundle) error {
|
||||||
|
|
||||||
return bundle.Apply(ctx, b, bundle.Seq(
|
return bundle.Apply(ctx, b, bundle.Seq(
|
||||||
whl.DetectPackage(),
|
whl.DetectPackage(),
|
||||||
|
whl.DefineArtifactsFromLibraries(),
|
||||||
))
|
))
|
||||||
}
|
}
|
||||||
|
|
|
@ -47,7 +47,11 @@ func (m *infer) Apply(ctx context.Context, b *bundle.Bundle) error {
|
||||||
return fmt.Errorf("artifact doesn't exist: %s", m.name)
|
return fmt.Errorf("artifact doesn't exist: %s", m.name)
|
||||||
}
|
}
|
||||||
|
|
||||||
if artifact.BuildCommand != "" {
|
// only try to infer command if it's not already defined
|
||||||
|
// and there is no explicitly files defined which means
|
||||||
|
// that the package is built outside of bundle cycles
|
||||||
|
// manually by customer
|
||||||
|
if artifact.BuildCommand != "" || len(artifact.Files) > 0 {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -10,7 +10,8 @@ import (
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle"
|
"github.com/databricks/cli/bundle"
|
||||||
"github.com/databricks/cli/bundle/config"
|
"github.com/databricks/cli/bundle/config"
|
||||||
"github.com/databricks/cli/libs/cmdio"
|
"github.com/databricks/cli/bundle/libraries"
|
||||||
|
"github.com/databricks/cli/libs/log"
|
||||||
)
|
)
|
||||||
|
|
||||||
type detectPkg struct {
|
type detectPkg struct {
|
||||||
|
@ -25,17 +26,22 @@ func (m *detectPkg) Name() string {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *detectPkg) Apply(ctx context.Context, b *bundle.Bundle) error {
|
func (m *detectPkg) Apply(ctx context.Context, b *bundle.Bundle) error {
|
||||||
cmdio.LogString(ctx, "artifacts.whl.AutoDetect: Detecting Python wheel project...")
|
wheelTasks := libraries.FindAllWheelTasksWithLocalLibraries(b)
|
||||||
|
if len(wheelTasks) == 0 {
|
||||||
|
log.Infof(ctx, "No local wheel tasks in databricks.yml config, skipping auto detect")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
log.Infof(ctx, "Detecting Python wheel project...")
|
||||||
|
|
||||||
// checking if there is setup.py in the bundle root
|
// checking if there is setup.py in the bundle root
|
||||||
setupPy := filepath.Join(b.Config.Path, "setup.py")
|
setupPy := filepath.Join(b.Config.Path, "setup.py")
|
||||||
_, err := os.Stat(setupPy)
|
_, err := os.Stat(setupPy)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
cmdio.LogString(ctx, "artifacts.whl.AutoDetect: No Python wheel project found at bundle root folder")
|
log.Infof(ctx, "No Python wheel project found at bundle root folder")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
cmdio.LogString(ctx, fmt.Sprintf("artifacts.whl.AutoDetect: Found Python wheel project at %s", b.Config.Path))
|
log.Infof(ctx, fmt.Sprintf("Found Python wheel project at %s", b.Config.Path))
|
||||||
module := extractModuleName(setupPy)
|
module := extractModuleName(setupPy)
|
||||||
|
|
||||||
if b.Config.Artifacts == nil {
|
if b.Config.Artifacts == nil {
|
||||||
|
|
|
@ -9,7 +9,8 @@ import (
|
||||||
"github.com/databricks/cli/bundle"
|
"github.com/databricks/cli/bundle"
|
||||||
"github.com/databricks/cli/bundle/config"
|
"github.com/databricks/cli/bundle/config"
|
||||||
"github.com/databricks/cli/libs/cmdio"
|
"github.com/databricks/cli/libs/cmdio"
|
||||||
"github.com/databricks/cli/python"
|
"github.com/databricks/cli/libs/log"
|
||||||
|
"github.com/databricks/cli/libs/python"
|
||||||
)
|
)
|
||||||
|
|
||||||
type build struct {
|
type build struct {
|
||||||
|
@ -32,7 +33,7 @@ func (m *build) Apply(ctx context.Context, b *bundle.Bundle) error {
|
||||||
return fmt.Errorf("artifact doesn't exist: %s", m.name)
|
return fmt.Errorf("artifact doesn't exist: %s", m.name)
|
||||||
}
|
}
|
||||||
|
|
||||||
cmdio.LogString(ctx, fmt.Sprintf("artifacts.whl.Build(%s): Building...", m.name))
|
cmdio.LogString(ctx, fmt.Sprintf("Building %s...", m.name))
|
||||||
|
|
||||||
dir := artifact.Path
|
dir := artifact.Path
|
||||||
|
|
||||||
|
@ -42,13 +43,13 @@ func (m *build) Apply(ctx context.Context, b *bundle.Bundle) error {
|
||||||
|
|
||||||
out, err := artifact.Build(ctx)
|
out, err := artifact.Build(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("artifacts.whl.Build(%s): Failed %w, output: %s", m.name, err, out)
|
return fmt.Errorf("build failed %s, error: %w, output: %s", m.name, err, out)
|
||||||
}
|
}
|
||||||
cmdio.LogString(ctx, fmt.Sprintf("artifacts.whl.Build(%s): Build succeeded", m.name))
|
log.Infof(ctx, "Build succeeded")
|
||||||
|
|
||||||
wheels := python.FindFilesWithSuffixInPath(distPath, ".whl")
|
wheels := python.FindFilesWithSuffixInPath(distPath, ".whl")
|
||||||
if len(wheels) == 0 {
|
if len(wheels) == 0 {
|
||||||
return fmt.Errorf("artifacts.whl.Build(%s): cannot find built wheel in %s", m.name, dir)
|
return fmt.Errorf("cannot find built wheel in %s for package %s", dir, m.name)
|
||||||
}
|
}
|
||||||
for _, wheel := range wheels {
|
for _, wheel := range wheels {
|
||||||
artifact.Files = append(artifact.Files, config.ArtifactFile{
|
artifact.Files = append(artifact.Files, config.ArtifactFile{
|
||||||
|
|
|
@ -0,0 +1,56 @@
|
||||||
|
package whl
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"path/filepath"
|
||||||
|
|
||||||
|
"github.com/databricks/cli/bundle"
|
||||||
|
"github.com/databricks/cli/bundle/config"
|
||||||
|
"github.com/databricks/cli/bundle/libraries"
|
||||||
|
"github.com/databricks/cli/libs/log"
|
||||||
|
)
|
||||||
|
|
||||||
|
type fromLibraries struct{}
|
||||||
|
|
||||||
|
func DefineArtifactsFromLibraries() bundle.Mutator {
|
||||||
|
return &fromLibraries{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *fromLibraries) Name() string {
|
||||||
|
return "artifacts.whl.DefineArtifactsFromLibraries"
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*fromLibraries) Apply(ctx context.Context, b *bundle.Bundle) error {
|
||||||
|
if len(b.Config.Artifacts) != 0 {
|
||||||
|
log.Debugf(ctx, "Skipping defining artifacts from libraries because artifacts section is explicitly defined")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
tasks := libraries.FindAllWheelTasksWithLocalLibraries(b)
|
||||||
|
for _, task := range tasks {
|
||||||
|
for _, lib := range task.Libraries {
|
||||||
|
matches, err := filepath.Glob(filepath.Join(b.Config.Path, lib.Whl))
|
||||||
|
// File referenced from libraries section does not exists, skipping
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, match := range matches {
|
||||||
|
name := filepath.Base(match)
|
||||||
|
if b.Config.Artifacts == nil {
|
||||||
|
b.Config.Artifacts = make(map[string]*config.Artifact)
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Debugf(ctx, "Adding an artifact block for %s", match)
|
||||||
|
b.Config.Artifacts[name] = &config.Artifact{
|
||||||
|
Files: []config.ArtifactFile{
|
||||||
|
{Source: match},
|
||||||
|
},
|
||||||
|
Type: config.ArtifactPythonWheel,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
|
@ -5,7 +5,7 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle"
|
"github.com/databricks/cli/bundle"
|
||||||
"github.com/databricks/cli/python"
|
"github.com/databricks/cli/libs/python"
|
||||||
)
|
)
|
||||||
|
|
||||||
type infer struct {
|
type infer struct {
|
||||||
|
@ -18,7 +18,22 @@ func (m *infer) Apply(ctx context.Context, b *bundle.Bundle) error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
artifact.BuildCommand = fmt.Sprintf("%s setup.py bdist_wheel", py)
|
|
||||||
|
// Note: using --build-number (build tag) flag does not help with re-installing
|
||||||
|
// libraries on all-purpose clusters. The reason is that `pip` ignoring build tag
|
||||||
|
// when upgrading the library and only look at wheel version.
|
||||||
|
// Build tag is only used for sorting the versions and the one with higher build tag takes priority when installed.
|
||||||
|
// It only works if no library is installed
|
||||||
|
// See https://github.com/pypa/pip/blob/a15dd75d98884c94a77d349b800c7c755d8c34e4/src/pip/_internal/index/package_finder.py#L522-L556
|
||||||
|
// https://github.com/pypa/pip/issues/4781
|
||||||
|
//
|
||||||
|
// Thus, the only way to reinstall the library on all-purpose cluster is to increase wheel version manually or
|
||||||
|
// use automatic version generation, f.e.
|
||||||
|
// setup(
|
||||||
|
// version=datetime.datetime.utcnow().strftime("%Y%m%d.%H%M%S"),
|
||||||
|
// ...
|
||||||
|
//)
|
||||||
|
artifact.BuildCommand = fmt.Sprintf(`"%s" setup.py bdist_wheel`, py)
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
111
bundle/bundle.go
111
bundle/bundle.go
|
@ -7,24 +7,39 @@
|
||||||
package bundle
|
package bundle
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle/config"
|
"github.com/databricks/cli/bundle/config"
|
||||||
|
"github.com/databricks/cli/bundle/env"
|
||||||
|
"github.com/databricks/cli/bundle/metadata"
|
||||||
"github.com/databricks/cli/folders"
|
"github.com/databricks/cli/folders"
|
||||||
"github.com/databricks/cli/libs/git"
|
"github.com/databricks/cli/libs/git"
|
||||||
"github.com/databricks/cli/libs/locker"
|
"github.com/databricks/cli/libs/locker"
|
||||||
|
"github.com/databricks/cli/libs/log"
|
||||||
|
"github.com/databricks/cli/libs/tags"
|
||||||
"github.com/databricks/cli/libs/terraform"
|
"github.com/databricks/cli/libs/terraform"
|
||||||
"github.com/databricks/databricks-sdk-go"
|
"github.com/databricks/databricks-sdk-go"
|
||||||
sdkconfig "github.com/databricks/databricks-sdk-go/config"
|
sdkconfig "github.com/databricks/databricks-sdk-go/config"
|
||||||
"github.com/hashicorp/terraform-exec/tfexec"
|
"github.com/hashicorp/terraform-exec/tfexec"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
const internalFolder = ".internal"
|
||||||
|
|
||||||
type Bundle struct {
|
type Bundle struct {
|
||||||
Config config.Root
|
Config config.Root
|
||||||
|
|
||||||
|
// Metadata about the bundle deployment. This is the interface Databricks services
|
||||||
|
// rely on to integrate with bundles when they need additional information about
|
||||||
|
// a bundle deployment.
|
||||||
|
//
|
||||||
|
// After deploy, a file containing the metadata (metadata.json) can be found
|
||||||
|
// in the WSFS location containing the bundle state.
|
||||||
|
Metadata metadata.Metadata
|
||||||
|
|
||||||
// Store a pointer to the workspace client.
|
// Store a pointer to the workspace client.
|
||||||
// It can be initialized on demand after loading the configuration.
|
// It can be initialized on demand after loading the configuration.
|
||||||
clientOnce sync.Once
|
clientOnce sync.Once
|
||||||
|
@ -41,54 +56,59 @@ type Bundle struct {
|
||||||
// if true, we skip approval checks for deploy, destroy resources and delete
|
// if true, we skip approval checks for deploy, destroy resources and delete
|
||||||
// files
|
// files
|
||||||
AutoApprove bool
|
AutoApprove bool
|
||||||
|
|
||||||
|
// Tagging is used to normalize tag keys and values.
|
||||||
|
// The implementation depends on the cloud being targeted.
|
||||||
|
Tagging tags.Cloud
|
||||||
}
|
}
|
||||||
|
|
||||||
const ExtraIncludePathsKey string = "DATABRICKS_BUNDLE_INCLUDES"
|
func Load(ctx context.Context, path string) (*Bundle, error) {
|
||||||
|
b := &Bundle{}
|
||||||
func Load(path string) (*Bundle, error) {
|
|
||||||
bundle := &Bundle{}
|
|
||||||
stat, err := os.Stat(path)
|
stat, err := os.Stat(path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
configFile, err := config.FileNames.FindInPath(path)
|
configFile, err := config.FileNames.FindInPath(path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
_, hasIncludePathEnv := os.LookupEnv(ExtraIncludePathsKey)
|
_, hasRootEnv := env.Root(ctx)
|
||||||
_, hasBundleRootEnv := os.LookupEnv(envBundleRoot)
|
_, hasIncludesEnv := env.Includes(ctx)
|
||||||
if hasIncludePathEnv && hasBundleRootEnv && stat.IsDir() {
|
if hasRootEnv && hasIncludesEnv && stat.IsDir() {
|
||||||
bundle.Config = config.Root{
|
log.Debugf(ctx, "No bundle configuration; using bundle root: %s", path)
|
||||||
|
b.Config = config.Root{
|
||||||
Path: path,
|
Path: path,
|
||||||
Bundle: config.Bundle{
|
Bundle: config.Bundle{
|
||||||
Name: filepath.Base(path),
|
Name: filepath.Base(path),
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
return bundle, nil
|
return b, nil
|
||||||
}
|
}
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
err = bundle.Config.Load(configFile)
|
log.Debugf(ctx, "Loading bundle configuration from: %s", configFile)
|
||||||
|
root, err := config.Load(configFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return bundle, nil
|
b.Config = *root
|
||||||
|
return b, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// MustLoad returns a bundle configuration.
|
// MustLoad returns a bundle configuration.
|
||||||
// It returns an error if a bundle was not found or could not be loaded.
|
// It returns an error if a bundle was not found or could not be loaded.
|
||||||
func MustLoad() (*Bundle, error) {
|
func MustLoad(ctx context.Context) (*Bundle, error) {
|
||||||
root, err := mustGetRoot()
|
root, err := mustGetRoot(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return Load(root)
|
return Load(ctx, root)
|
||||||
}
|
}
|
||||||
|
|
||||||
// TryLoad returns a bundle configuration if there is one, but doesn't fail if there isn't one.
|
// TryLoad returns a bundle configuration if there is one, but doesn't fail if there isn't one.
|
||||||
// It returns an error if a bundle was found but could not be loaded.
|
// It returns an error if a bundle was found but could not be loaded.
|
||||||
// It returns a `nil` bundle if a bundle was not found.
|
// It returns a `nil` bundle if a bundle was not found.
|
||||||
func TryLoad() (*Bundle, error) {
|
func TryLoad(ctx context.Context) (*Bundle, error) {
|
||||||
root, err := tryGetRoot()
|
root, err := tryGetRoot(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -98,13 +118,21 @@ func TryLoad() (*Bundle, error) {
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
return Load(root)
|
return Load(ctx, root)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *Bundle) InitializeWorkspaceClient() (*databricks.WorkspaceClient, error) {
|
||||||
|
client, err := b.Config.Workspace.Client()
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("cannot resolve bundle auth configuration: %w", err)
|
||||||
|
}
|
||||||
|
return client, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *Bundle) WorkspaceClient() *databricks.WorkspaceClient {
|
func (b *Bundle) WorkspaceClient() *databricks.WorkspaceClient {
|
||||||
b.clientOnce.Do(func() {
|
b.clientOnce.Do(func() {
|
||||||
var err error
|
var err error
|
||||||
b.client, err = b.Config.Workspace.Client()
|
b.client, err = b.InitializeWorkspaceClient()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
|
@ -113,14 +141,13 @@ func (b *Bundle) WorkspaceClient() *databricks.WorkspaceClient {
|
||||||
}
|
}
|
||||||
|
|
||||||
// CacheDir returns directory to use for temporary files for this bundle.
|
// CacheDir returns directory to use for temporary files for this bundle.
|
||||||
// Scoped to the bundle's environment.
|
// Scoped to the bundle's target.
|
||||||
func (b *Bundle) CacheDir(paths ...string) (string, error) {
|
func (b *Bundle) CacheDir(ctx context.Context, paths ...string) (string, error) {
|
||||||
if b.Config.Bundle.Environment == "" {
|
if b.Config.Bundle.Target == "" {
|
||||||
panic("environment not set")
|
panic("target not set")
|
||||||
}
|
}
|
||||||
|
|
||||||
cacheDirName, exists := os.LookupEnv("DATABRICKS_BUNDLE_TMP")
|
cacheDirName, exists := env.TempDir(ctx)
|
||||||
|
|
||||||
if !exists || cacheDirName == "" {
|
if !exists || cacheDirName == "" {
|
||||||
cacheDirName = filepath.Join(
|
cacheDirName = filepath.Join(
|
||||||
// Anchor at bundle root directory.
|
// Anchor at bundle root directory.
|
||||||
|
@ -134,8 +161,8 @@ func (b *Bundle) CacheDir(paths ...string) (string, error) {
|
||||||
// Fixed components of the result path.
|
// Fixed components of the result path.
|
||||||
parts := []string{
|
parts := []string{
|
||||||
cacheDirName,
|
cacheDirName,
|
||||||
// Scope with environment name.
|
// Scope with target name.
|
||||||
b.Config.Bundle.Environment,
|
b.Config.Bundle.Target,
|
||||||
}
|
}
|
||||||
|
|
||||||
// Append dynamic components of the result path.
|
// Append dynamic components of the result path.
|
||||||
|
@ -151,6 +178,38 @@ func (b *Bundle) CacheDir(paths ...string) (string, error) {
|
||||||
return dir, nil
|
return dir, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// This directory is used to store and automaticaly sync internal bundle files, such as, f.e
|
||||||
|
// notebook trampoline files for Python wheel and etc.
|
||||||
|
func (b *Bundle) InternalDir(ctx context.Context) (string, error) {
|
||||||
|
cacheDir, err := b.CacheDir(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
dir := filepath.Join(cacheDir, internalFolder)
|
||||||
|
err = os.MkdirAll(dir, 0700)
|
||||||
|
if err != nil {
|
||||||
|
return dir, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return dir, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetSyncIncludePatterns returns a list of user defined includes
|
||||||
|
// And also adds InternalDir folder to include list for sync command
|
||||||
|
// so this folder is always synced
|
||||||
|
func (b *Bundle) GetSyncIncludePatterns(ctx context.Context) ([]string, error) {
|
||||||
|
internalDir, err := b.InternalDir(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
internalDirRel, err := filepath.Rel(b.Config.Path, internalDir)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return append(b.Config.Sync.Include, filepath.ToSlash(filepath.Join(internalDirRel, "*.*"))), nil
|
||||||
|
}
|
||||||
|
|
||||||
func (b *Bundle) GitRepository() (*git.Repository, error) {
|
func (b *Bundle) GitRepository() (*git.Repository, error) {
|
||||||
rootPath, err := folders.FindDirWithLeaf(b.Config.Path, ".git")
|
rootPath, err := folders.FindDirWithLeaf(b.Config.Path, ".git")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -1,108 +1,112 @@
|
||||||
package bundle
|
package bundle
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"github.com/databricks/cli/bundle/env"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestLoadNotExists(t *testing.T) {
|
func TestLoadNotExists(t *testing.T) {
|
||||||
b, err := Load("/doesntexist")
|
b, err := Load(context.Background(), "/doesntexist")
|
||||||
assert.True(t, os.IsNotExist(err))
|
assert.True(t, os.IsNotExist(err))
|
||||||
assert.Nil(t, b)
|
assert.Nil(t, b)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestLoadExists(t *testing.T) {
|
func TestLoadExists(t *testing.T) {
|
||||||
b, err := Load("./tests/basic")
|
b, err := Load(context.Background(), "./tests/basic")
|
||||||
require.Nil(t, err)
|
require.Nil(t, err)
|
||||||
assert.Equal(t, "basic", b.Config.Bundle.Name)
|
assert.Equal(t, "basic", b.Config.Bundle.Name)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestBundleCacheDir(t *testing.T) {
|
func TestBundleCacheDir(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
projectDir := t.TempDir()
|
projectDir := t.TempDir()
|
||||||
f1, err := os.Create(filepath.Join(projectDir, "databricks.yml"))
|
f1, err := os.Create(filepath.Join(projectDir, "databricks.yml"))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
f1.Close()
|
f1.Close()
|
||||||
|
|
||||||
bundle, err := Load(projectDir)
|
bundle, err := Load(ctx, projectDir)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
// Artificially set environment.
|
// Artificially set target.
|
||||||
// This is otherwise done by [mutators.SelectEnvironment].
|
// This is otherwise done by [mutators.SelectTarget].
|
||||||
bundle.Config.Bundle.Environment = "default"
|
bundle.Config.Bundle.Target = "default"
|
||||||
|
|
||||||
// unset env variable in case it's set
|
// unset env variable in case it's set
|
||||||
t.Setenv("DATABRICKS_BUNDLE_TMP", "")
|
t.Setenv("DATABRICKS_BUNDLE_TMP", "")
|
||||||
|
|
||||||
cacheDir, err := bundle.CacheDir()
|
cacheDir, err := bundle.CacheDir(ctx)
|
||||||
|
|
||||||
// format is <CWD>/.databricks/bundle/<environment>
|
// format is <CWD>/.databricks/bundle/<target>
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.Equal(t, filepath.Join(projectDir, ".databricks", "bundle", "default"), cacheDir)
|
assert.Equal(t, filepath.Join(projectDir, ".databricks", "bundle", "default"), cacheDir)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestBundleCacheDirOverride(t *testing.T) {
|
func TestBundleCacheDirOverride(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
projectDir := t.TempDir()
|
projectDir := t.TempDir()
|
||||||
bundleTmpDir := t.TempDir()
|
bundleTmpDir := t.TempDir()
|
||||||
f1, err := os.Create(filepath.Join(projectDir, "databricks.yml"))
|
f1, err := os.Create(filepath.Join(projectDir, "databricks.yml"))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
f1.Close()
|
f1.Close()
|
||||||
|
|
||||||
bundle, err := Load(projectDir)
|
bundle, err := Load(ctx, projectDir)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
// Artificially set environment.
|
// Artificially set target.
|
||||||
// This is otherwise done by [mutators.SelectEnvironment].
|
// This is otherwise done by [mutators.SelectTarget].
|
||||||
bundle.Config.Bundle.Environment = "default"
|
bundle.Config.Bundle.Target = "default"
|
||||||
|
|
||||||
// now we expect to use 'bundleTmpDir' instead of CWD/.databricks/bundle
|
// now we expect to use 'bundleTmpDir' instead of CWD/.databricks/bundle
|
||||||
t.Setenv("DATABRICKS_BUNDLE_TMP", bundleTmpDir)
|
t.Setenv("DATABRICKS_BUNDLE_TMP", bundleTmpDir)
|
||||||
|
|
||||||
cacheDir, err := bundle.CacheDir()
|
cacheDir, err := bundle.CacheDir(ctx)
|
||||||
|
|
||||||
// format is <DATABRICKS_BUNDLE_TMP>/<environment>
|
// format is <DATABRICKS_BUNDLE_TMP>/<target>
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.Equal(t, filepath.Join(bundleTmpDir, "default"), cacheDir)
|
assert.Equal(t, filepath.Join(bundleTmpDir, "default"), cacheDir)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestBundleMustLoadSuccess(t *testing.T) {
|
func TestBundleMustLoadSuccess(t *testing.T) {
|
||||||
t.Setenv(envBundleRoot, "./tests/basic")
|
t.Setenv(env.RootVariable, "./tests/basic")
|
||||||
b, err := MustLoad()
|
b, err := MustLoad(context.Background())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, "tests/basic", filepath.ToSlash(b.Config.Path))
|
assert.Equal(t, "tests/basic", filepath.ToSlash(b.Config.Path))
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestBundleMustLoadFailureWithEnv(t *testing.T) {
|
func TestBundleMustLoadFailureWithEnv(t *testing.T) {
|
||||||
t.Setenv(envBundleRoot, "./tests/doesntexist")
|
t.Setenv(env.RootVariable, "./tests/doesntexist")
|
||||||
_, err := MustLoad()
|
_, err := MustLoad(context.Background())
|
||||||
require.Error(t, err, "not a directory")
|
require.Error(t, err, "not a directory")
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestBundleMustLoadFailureIfNotFound(t *testing.T) {
|
func TestBundleMustLoadFailureIfNotFound(t *testing.T) {
|
||||||
chdir(t, t.TempDir())
|
chdir(t, t.TempDir())
|
||||||
_, err := MustLoad()
|
_, err := MustLoad(context.Background())
|
||||||
require.Error(t, err, "unable to find bundle root")
|
require.Error(t, err, "unable to find bundle root")
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestBundleTryLoadSuccess(t *testing.T) {
|
func TestBundleTryLoadSuccess(t *testing.T) {
|
||||||
t.Setenv(envBundleRoot, "./tests/basic")
|
t.Setenv(env.RootVariable, "./tests/basic")
|
||||||
b, err := TryLoad()
|
b, err := TryLoad(context.Background())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, "tests/basic", filepath.ToSlash(b.Config.Path))
|
assert.Equal(t, "tests/basic", filepath.ToSlash(b.Config.Path))
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestBundleTryLoadFailureWithEnv(t *testing.T) {
|
func TestBundleTryLoadFailureWithEnv(t *testing.T) {
|
||||||
t.Setenv(envBundleRoot, "./tests/doesntexist")
|
t.Setenv(env.RootVariable, "./tests/doesntexist")
|
||||||
_, err := TryLoad()
|
_, err := TryLoad(context.Background())
|
||||||
require.Error(t, err, "not a directory")
|
require.Error(t, err, "not a directory")
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestBundleTryLoadOkIfNotFound(t *testing.T) {
|
func TestBundleTryLoadOkIfNotFound(t *testing.T) {
|
||||||
chdir(t, t.TempDir())
|
chdir(t, t.TempDir())
|
||||||
b, err := TryLoad()
|
b, err := TryLoad(context.Background())
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.Nil(t, b)
|
assert.Nil(t, b)
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,16 +1,23 @@
|
||||||
package config
|
package config
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"os/exec"
|
|
||||||
"path"
|
"path"
|
||||||
"strings"
|
|
||||||
|
|
||||||
|
"github.com/databricks/cli/bundle/config/paths"
|
||||||
|
"github.com/databricks/cli/libs/exec"
|
||||||
"github.com/databricks/databricks-sdk-go/service/compute"
|
"github.com/databricks/databricks-sdk-go/service/compute"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
type Artifacts map[string]*Artifact
|
||||||
|
|
||||||
|
func (artifacts Artifacts) SetConfigFilePath(path string) {
|
||||||
|
for _, artifact := range artifacts {
|
||||||
|
artifact.ConfigFilePath = path
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
type ArtifactType string
|
type ArtifactType string
|
||||||
|
|
||||||
const ArtifactPythonWheel ArtifactType = `whl`
|
const ArtifactPythonWheel ArtifactType = `whl`
|
||||||
|
@ -28,12 +35,14 @@ type Artifact struct {
|
||||||
|
|
||||||
// The local path to the directory with a root of artifact,
|
// The local path to the directory with a root of artifact,
|
||||||
// for example, where setup.py is for Python projects
|
// for example, where setup.py is for Python projects
|
||||||
Path string `json:"path"`
|
Path string `json:"path,omitempty"`
|
||||||
|
|
||||||
// The relative or absolute path to the built artifact files
|
// The relative or absolute path to the built artifact files
|
||||||
// (Python wheel, Java jar and etc) itself
|
// (Python wheel, Java jar and etc) itself
|
||||||
Files []ArtifactFile `json:"files"`
|
Files []ArtifactFile `json:"files,omitempty"`
|
||||||
BuildCommand string `json:"build"`
|
BuildCommand string `json:"build,omitempty"`
|
||||||
|
|
||||||
|
paths.Paths
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *Artifact) Build(ctx context.Context) ([]byte, error) {
|
func (a *Artifact) Build(ctx context.Context) ([]byte, error) {
|
||||||
|
@ -41,19 +50,11 @@ func (a *Artifact) Build(ctx context.Context) ([]byte, error) {
|
||||||
return nil, fmt.Errorf("no build property defined")
|
return nil, fmt.Errorf("no build property defined")
|
||||||
}
|
}
|
||||||
|
|
||||||
out := make([][]byte, 0)
|
e, err := exec.NewCommandExecutor(a.Path)
|
||||||
commands := strings.Split(a.BuildCommand, " && ")
|
|
||||||
for _, command := range commands {
|
|
||||||
buildParts := strings.Split(command, " ")
|
|
||||||
cmd := exec.CommandContext(ctx, buildParts[0], buildParts[1:]...)
|
|
||||||
cmd.Dir = a.Path
|
|
||||||
res, err := cmd.CombinedOutput()
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return res, err
|
return nil, err
|
||||||
}
|
}
|
||||||
out = append(out, res)
|
return e.Exec(ctx, a.BuildCommand)
|
||||||
}
|
|
||||||
return bytes.Join(out, []byte{}), nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *Artifact) NormalisePaths() {
|
func (a *Artifact) NormalisePaths() {
|
||||||
|
@ -67,9 +68,13 @@ func (a *Artifact) NormalisePaths() {
|
||||||
remotePath := path.Join(wsfsBase, f.RemotePath)
|
remotePath := path.Join(wsfsBase, f.RemotePath)
|
||||||
for i := range f.Libraries {
|
for i := range f.Libraries {
|
||||||
lib := f.Libraries[i]
|
lib := f.Libraries[i]
|
||||||
switch a.Type {
|
if lib.Whl != "" {
|
||||||
case ArtifactPythonWheel:
|
|
||||||
lib.Whl = remotePath
|
lib.Whl = remotePath
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if lib.Jar != "" {
|
||||||
|
lib.Jar = remotePath
|
||||||
|
continue
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,18 @@
|
||||||
|
package config
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestArtifactBuild(t *testing.T) {
|
||||||
|
artifact := Artifact{
|
||||||
|
BuildCommand: "echo 'Hello from build command'",
|
||||||
|
}
|
||||||
|
res, err := artifact.Build(context.Background())
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.NotNil(t, res)
|
||||||
|
assert.Equal(t, "Hello from build command\n", string(res))
|
||||||
|
}
|
|
@ -15,7 +15,10 @@ type Bundle struct {
|
||||||
// Default warehouse to run SQL on.
|
// Default warehouse to run SQL on.
|
||||||
// DefaultWarehouse string `json:"default_warehouse,omitempty"`
|
// DefaultWarehouse string `json:"default_warehouse,omitempty"`
|
||||||
|
|
||||||
// Environment is set by the mutator that selects the environment.
|
// Target is set by the mutator that selects the target.
|
||||||
|
Target string `json:"target,omitempty" bundle:"readonly"`
|
||||||
|
|
||||||
|
// DEPRECATED. Left for backward compatibility with Target
|
||||||
Environment string `json:"environment,omitempty" bundle:"readonly"`
|
Environment string `json:"environment,omitempty" bundle:"readonly"`
|
||||||
|
|
||||||
// Terraform holds configuration related to Terraform.
|
// Terraform holds configuration related to Terraform.
|
||||||
|
@ -26,16 +29,16 @@ type Bundle struct {
|
||||||
Lock Lock `json:"lock" bundle:"readonly"`
|
Lock Lock `json:"lock" bundle:"readonly"`
|
||||||
|
|
||||||
// Force-override Git branch validation.
|
// Force-override Git branch validation.
|
||||||
Force bool `json:"force" bundle:"readonly"`
|
Force bool `json:"force,omitempty" bundle:"readonly"`
|
||||||
|
|
||||||
// Contains Git information like current commit, current branch and
|
// Contains Git information like current commit, current branch and
|
||||||
// origin url. Automatically loaded by reading .git directory if not specified
|
// origin url. Automatically loaded by reading .git directory if not specified
|
||||||
Git Git `json:"git,omitempty"`
|
Git Git `json:"git,omitempty"`
|
||||||
|
|
||||||
// Determines the mode of the environment.
|
// Determines the mode of the target.
|
||||||
// For example, 'mode: development' can be used for deployments for
|
// For example, 'mode: development' can be used for deployments for
|
||||||
// development purposes.
|
// development purposes.
|
||||||
// Annotated readonly as this should be set at the environment level.
|
// Annotated readonly as this should be set at the target level.
|
||||||
Mode Mode `json:"mode,omitempty" bundle:"readonly"`
|
Mode Mode `json:"mode,omitempty" bundle:"readonly"`
|
||||||
|
|
||||||
// Overrides the compute used for jobs and other supported assets.
|
// Overrides the compute used for jobs and other supported assets.
|
||||||
|
|
|
@ -0,0 +1,26 @@
|
||||||
|
package config
|
||||||
|
|
||||||
|
type Experimental struct {
|
||||||
|
Scripts map[ScriptHook]Command `json:"scripts,omitempty"`
|
||||||
|
|
||||||
|
// By default Python wheel tasks deployed as is to Databricks platform.
|
||||||
|
// If notebook wrapper required (for example, used in DBR < 13.1 or other configuration differences), users can provide a following experimental setting
|
||||||
|
// experimental:
|
||||||
|
// python_wheel_wrapper: true
|
||||||
|
// In this case the configured wheel task will be deployed as a notebook task which install defined wheel in runtime and executes it.
|
||||||
|
// For more details see https://github.com/databricks/cli/pull/797 and https://github.com/databricks/cli/pull/635
|
||||||
|
PythonWheelWrapper bool `json:"python_wheel_wrapper,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type Command string
|
||||||
|
type ScriptHook string
|
||||||
|
|
||||||
|
// These hook names are subject to change and currently experimental
|
||||||
|
const (
|
||||||
|
ScriptPreInit ScriptHook = "preinit"
|
||||||
|
ScriptPostInit ScriptHook = "postinit"
|
||||||
|
ScriptPreBuild ScriptHook = "prebuild"
|
||||||
|
ScriptPostBuild ScriptHook = "postbuild"
|
||||||
|
ScriptPreDeploy ScriptHook = "predeploy"
|
||||||
|
ScriptPostDeploy ScriptHook = "postdeploy"
|
||||||
|
)
|
|
@ -0,0 +1,43 @@
|
||||||
|
package config
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
)
|
||||||
|
|
||||||
|
type ConfigFileNames []string
|
||||||
|
|
||||||
|
// FileNames contains allowed names of root bundle configuration files.
|
||||||
|
var FileNames = ConfigFileNames{
|
||||||
|
"databricks.yml",
|
||||||
|
"databricks.yaml",
|
||||||
|
"bundle.yml",
|
||||||
|
"bundle.yaml",
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c ConfigFileNames) FindInPath(path string) (string, error) {
|
||||||
|
result := ""
|
||||||
|
var firstErr error
|
||||||
|
|
||||||
|
for _, file := range c {
|
||||||
|
filePath := filepath.Join(path, file)
|
||||||
|
_, err := os.Stat(filePath)
|
||||||
|
if err == nil {
|
||||||
|
if result != "" {
|
||||||
|
return "", fmt.Errorf("multiple bundle root configuration files found in %s", path)
|
||||||
|
}
|
||||||
|
result = filePath
|
||||||
|
} else {
|
||||||
|
if firstErr == nil {
|
||||||
|
firstErr = err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if result == "" {
|
||||||
|
return "", firstErr
|
||||||
|
}
|
||||||
|
|
||||||
|
return result, nil
|
||||||
|
}
|
|
@ -0,0 +1,70 @@
|
||||||
|
package config
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"runtime"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestConfigFileNames_FindInPath(t *testing.T) {
|
||||||
|
testCases := []struct {
|
||||||
|
name string
|
||||||
|
files []string
|
||||||
|
expected string
|
||||||
|
err string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "file found",
|
||||||
|
files: []string{"databricks.yml"},
|
||||||
|
expected: "BASE/databricks.yml",
|
||||||
|
err: "",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "file found",
|
||||||
|
files: []string{"bundle.yml"},
|
||||||
|
expected: "BASE/bundle.yml",
|
||||||
|
err: "",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "multiple files found",
|
||||||
|
files: []string{"databricks.yaml", "bundle.yml"},
|
||||||
|
expected: "",
|
||||||
|
err: "multiple bundle root configuration files found",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "file not found",
|
||||||
|
files: []string{},
|
||||||
|
expected: "",
|
||||||
|
err: "no such file or directory",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
if runtime.GOOS == "windows" {
|
||||||
|
testCases[3].err = "The system cannot find the file specified."
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range testCases {
|
||||||
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
|
projectDir := t.TempDir()
|
||||||
|
for _, file := range tc.files {
|
||||||
|
f1, _ := os.Create(filepath.Join(projectDir, file))
|
||||||
|
f1.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
result, err := FileNames.FindInPath(projectDir)
|
||||||
|
|
||||||
|
expected := strings.Replace(tc.expected, "BASE/", projectDir+string(os.PathSeparator), 1)
|
||||||
|
assert.Equal(t, expected, result)
|
||||||
|
|
||||||
|
if tc.err != "" {
|
||||||
|
assert.ErrorContains(t, err, tc.err)
|
||||||
|
} else {
|
||||||
|
assert.NoError(t, err)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
|
@ -5,6 +5,9 @@ type Git struct {
|
||||||
OriginURL string `json:"origin_url,omitempty"`
|
OriginURL string `json:"origin_url,omitempty"`
|
||||||
Commit string `json:"commit,omitempty" bundle:"readonly"`
|
Commit string `json:"commit,omitempty" bundle:"readonly"`
|
||||||
|
|
||||||
|
// Path to bundle root relative to the git repository root.
|
||||||
|
BundleRootPath string `json:"bundle_root_path,omitempty" bundle:"readonly"`
|
||||||
|
|
||||||
// Inferred is set to true if the Git details were inferred and weren't set explicitly
|
// Inferred is set to true if the Git details were inferred and weren't set explicitly
|
||||||
Inferred bool `json:"-" bundle:"readonly"`
|
Inferred bool `json:"-" bundle:"readonly"`
|
||||||
|
|
||||||
|
|
|
@ -9,10 +9,11 @@ import (
|
||||||
"sort"
|
"sort"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
"slices"
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle"
|
"github.com/databricks/cli/bundle"
|
||||||
"github.com/databricks/cli/bundle/config/variable"
|
"github.com/databricks/cli/bundle/config/variable"
|
||||||
"golang.org/x/exp/maps"
|
"golang.org/x/exp/maps"
|
||||||
"golang.org/x/exp/slices"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const Delimiter = "."
|
const Delimiter = "."
|
||||||
|
@ -183,7 +184,7 @@ func (a *accumulator) Resolve(path string, seenPaths []string, fns ...LookupFunc
|
||||||
// fetch the string node to resolve
|
// fetch the string node to resolve
|
||||||
field, ok := a.strings[path]
|
field, ok := a.strings[path]
|
||||||
if !ok {
|
if !ok {
|
||||||
return fmt.Errorf("could not resolve reference %s", path)
|
return fmt.Errorf("no value found for interpolation reference: ${%s}", path)
|
||||||
}
|
}
|
||||||
|
|
||||||
// return early if the string field has no variables to interpolate
|
// return early if the string field has no variables to interpolate
|
||||||
|
|
|
@ -247,5 +247,5 @@ func TestInterpolationInvalidVariableReference(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
err := expand(&config)
|
err := expand(&config)
|
||||||
assert.ErrorContains(t, err, "could not resolve reference vars.foo")
|
assert.ErrorContains(t, err, "no value found for interpolation reference: ${vars.foo}")
|
||||||
}
|
}
|
||||||
|
|
|
@ -3,9 +3,8 @@ package interpolation
|
||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"slices"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"golang.org/x/exp/slices"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// LookupFunction returns the value to rewrite a path expression to.
|
// LookupFunction returns the value to rewrite a path expression to.
|
||||||
|
|
|
@ -4,11 +4,11 @@ type Lock struct {
|
||||||
// Enabled toggles deployment lock. True by default.
|
// Enabled toggles deployment lock. True by default.
|
||||||
// Use a pointer value so that only explicitly configured values are set
|
// Use a pointer value so that only explicitly configured values are set
|
||||||
// and we don't merge configuration with zero-initialized values.
|
// and we don't merge configuration with zero-initialized values.
|
||||||
Enabled *bool `json:"enabled"`
|
Enabled *bool `json:"enabled,omitempty"`
|
||||||
|
|
||||||
// Force acquisition of deployment lock even if it is currently held.
|
// Force acquisition of deployment lock even if it is currently held.
|
||||||
// This may be necessary if a prior deployment failed to release the lock.
|
// This may be necessary if a prior deployment failed to release the lock.
|
||||||
Force bool `json:"force"`
|
Force bool `json:"force,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (lock Lock) IsEnabled() bool {
|
func (lock Lock) IsEnabled() bool {
|
||||||
|
|
|
@ -1,37 +0,0 @@
|
||||||
package mutator
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle"
|
|
||||||
"github.com/databricks/cli/bundle/config"
|
|
||||||
)
|
|
||||||
|
|
||||||
type defineDefaultEnvironment struct {
|
|
||||||
name string
|
|
||||||
}
|
|
||||||
|
|
||||||
// DefineDefaultEnvironment adds an environment named "default"
|
|
||||||
// to the configuration if none have been defined.
|
|
||||||
func DefineDefaultEnvironment() bundle.Mutator {
|
|
||||||
return &defineDefaultEnvironment{
|
|
||||||
name: "default",
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *defineDefaultEnvironment) Name() string {
|
|
||||||
return fmt.Sprintf("DefineDefaultEnvironment(%s)", m.name)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *defineDefaultEnvironment) Apply(_ context.Context, b *bundle.Bundle) error {
|
|
||||||
// Nothing to do if the configuration has at least 1 environment.
|
|
||||||
if len(b.Config.Environments) > 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Define default environment.
|
|
||||||
b.Config.Environments = make(map[string]*config.Environment)
|
|
||||||
b.Config.Environments[m.name] = &config.Environment{}
|
|
||||||
return nil
|
|
||||||
}
|
|
|
@ -1,35 +0,0 @@
|
||||||
package mutator_test
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle"
|
|
||||||
"github.com/databricks/cli/bundle/config"
|
|
||||||
"github.com/databricks/cli/bundle/config/mutator"
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestDefaultEnvironment(t *testing.T) {
|
|
||||||
bundle := &bundle.Bundle{}
|
|
||||||
err := mutator.DefineDefaultEnvironment().Apply(context.Background(), bundle)
|
|
||||||
require.NoError(t, err)
|
|
||||||
env, ok := bundle.Config.Environments["default"]
|
|
||||||
assert.True(t, ok)
|
|
||||||
assert.Equal(t, &config.Environment{}, env)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestDefaultEnvironmentAlreadySpecified(t *testing.T) {
|
|
||||||
bundle := &bundle.Bundle{
|
|
||||||
Config: config.Root{
|
|
||||||
Environments: map[string]*config.Environment{
|
|
||||||
"development": {},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
err := mutator.DefineDefaultEnvironment().Apply(context.Background(), bundle)
|
|
||||||
require.NoError(t, err)
|
|
||||||
_, ok := bundle.Config.Environments["default"]
|
|
||||||
assert.False(t, ok)
|
|
||||||
}
|
|
|
@ -0,0 +1,37 @@
|
||||||
|
package mutator
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/databricks/cli/bundle"
|
||||||
|
"github.com/databricks/cli/bundle/config"
|
||||||
|
)
|
||||||
|
|
||||||
|
type defineDefaultTarget struct {
|
||||||
|
name string
|
||||||
|
}
|
||||||
|
|
||||||
|
// DefineDefaultTarget adds a target named "default"
|
||||||
|
// to the configuration if none have been defined.
|
||||||
|
func DefineDefaultTarget() bundle.Mutator {
|
||||||
|
return &defineDefaultTarget{
|
||||||
|
name: "default",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *defineDefaultTarget) Name() string {
|
||||||
|
return fmt.Sprintf("DefineDefaultTarget(%s)", m.name)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *defineDefaultTarget) Apply(_ context.Context, b *bundle.Bundle) error {
|
||||||
|
// Nothing to do if the configuration has at least 1 target.
|
||||||
|
if len(b.Config.Targets) > 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Define default target.
|
||||||
|
b.Config.Targets = make(map[string]*config.Target)
|
||||||
|
b.Config.Targets[m.name] = &config.Target{}
|
||||||
|
return nil
|
||||||
|
}
|
|
@ -0,0 +1,35 @@
|
||||||
|
package mutator_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/databricks/cli/bundle"
|
||||||
|
"github.com/databricks/cli/bundle/config"
|
||||||
|
"github.com/databricks/cli/bundle/config/mutator"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestDefaultTarget(t *testing.T) {
|
||||||
|
b := &bundle.Bundle{}
|
||||||
|
err := bundle.Apply(context.Background(), b, mutator.DefineDefaultTarget())
|
||||||
|
require.NoError(t, err)
|
||||||
|
env, ok := b.Config.Targets["default"]
|
||||||
|
assert.True(t, ok)
|
||||||
|
assert.Equal(t, &config.Target{}, env)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDefaultTargetAlreadySpecified(t *testing.T) {
|
||||||
|
b := &bundle.Bundle{
|
||||||
|
Config: config.Root{
|
||||||
|
Targets: map[string]*config.Target{
|
||||||
|
"development": {},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
err := bundle.Apply(context.Background(), b, mutator.DefineDefaultTarget())
|
||||||
|
require.NoError(t, err)
|
||||||
|
_, ok := b.Config.Targets["default"]
|
||||||
|
assert.False(t, ok)
|
||||||
|
}
|
|
@ -25,12 +25,12 @@ func (m *defineDefaultWorkspacePaths) Apply(ctx context.Context, b *bundle.Bundl
|
||||||
return fmt.Errorf("unable to define default workspace paths: workspace root not defined")
|
return fmt.Errorf("unable to define default workspace paths: workspace root not defined")
|
||||||
}
|
}
|
||||||
|
|
||||||
if b.Config.Workspace.FilesPath == "" {
|
if b.Config.Workspace.FilePath == "" {
|
||||||
b.Config.Workspace.FilesPath = path.Join(root, "files")
|
b.Config.Workspace.FilePath = path.Join(root, "files")
|
||||||
}
|
}
|
||||||
|
|
||||||
if b.Config.Workspace.ArtifactsPath == "" {
|
if b.Config.Workspace.ArtifactPath == "" {
|
||||||
b.Config.Workspace.ArtifactsPath = path.Join(root, "artifacts")
|
b.Config.Workspace.ArtifactPath = path.Join(root, "artifacts")
|
||||||
}
|
}
|
||||||
|
|
||||||
if b.Config.Workspace.StatePath == "" {
|
if b.Config.Workspace.StatePath == "" {
|
||||||
|
|
|
@ -12,34 +12,34 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestDefineDefaultWorkspacePaths(t *testing.T) {
|
func TestDefineDefaultWorkspacePaths(t *testing.T) {
|
||||||
bundle := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
Workspace: config.Workspace{
|
Workspace: config.Workspace{
|
||||||
RootPath: "/",
|
RootPath: "/",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
err := mutator.DefineDefaultWorkspacePaths().Apply(context.Background(), bundle)
|
err := bundle.Apply(context.Background(), b, mutator.DefineDefaultWorkspacePaths())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, "/files", bundle.Config.Workspace.FilesPath)
|
assert.Equal(t, "/files", b.Config.Workspace.FilePath)
|
||||||
assert.Equal(t, "/artifacts", bundle.Config.Workspace.ArtifactsPath)
|
assert.Equal(t, "/artifacts", b.Config.Workspace.ArtifactPath)
|
||||||
assert.Equal(t, "/state", bundle.Config.Workspace.StatePath)
|
assert.Equal(t, "/state", b.Config.Workspace.StatePath)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestDefineDefaultWorkspacePathsAlreadySet(t *testing.T) {
|
func TestDefineDefaultWorkspacePathsAlreadySet(t *testing.T) {
|
||||||
bundle := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
Workspace: config.Workspace{
|
Workspace: config.Workspace{
|
||||||
RootPath: "/",
|
RootPath: "/",
|
||||||
FilesPath: "/foo/bar",
|
FilePath: "/foo/bar",
|
||||||
ArtifactsPath: "/foo/bar",
|
ArtifactPath: "/foo/bar",
|
||||||
StatePath: "/foo/bar",
|
StatePath: "/foo/bar",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
err := mutator.DefineDefaultWorkspacePaths().Apply(context.Background(), bundle)
|
err := bundle.Apply(context.Background(), b, mutator.DefineDefaultWorkspacePaths())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, "/foo/bar", bundle.Config.Workspace.FilesPath)
|
assert.Equal(t, "/foo/bar", b.Config.Workspace.FilePath)
|
||||||
assert.Equal(t, "/foo/bar", bundle.Config.Workspace.ArtifactsPath)
|
assert.Equal(t, "/foo/bar", b.Config.Workspace.ArtifactPath)
|
||||||
assert.Equal(t, "/foo/bar", bundle.Config.Workspace.StatePath)
|
assert.Equal(t, "/foo/bar", b.Config.Workspace.StatePath)
|
||||||
}
|
}
|
||||||
|
|
|
@ -27,14 +27,14 @@ func (m *defineDefaultWorkspaceRoot) Apply(ctx context.Context, b *bundle.Bundle
|
||||||
return fmt.Errorf("unable to define default workspace root: bundle name not defined")
|
return fmt.Errorf("unable to define default workspace root: bundle name not defined")
|
||||||
}
|
}
|
||||||
|
|
||||||
if b.Config.Bundle.Environment == "" {
|
if b.Config.Bundle.Target == "" {
|
||||||
return fmt.Errorf("unable to define default workspace root: bundle environment not selected")
|
return fmt.Errorf("unable to define default workspace root: bundle target not selected")
|
||||||
}
|
}
|
||||||
|
|
||||||
b.Config.Workspace.RootPath = fmt.Sprintf(
|
b.Config.Workspace.RootPath = fmt.Sprintf(
|
||||||
"~/.bundle/%s/%s",
|
"~/.bundle/%s/%s",
|
||||||
b.Config.Bundle.Name,
|
b.Config.Bundle.Name,
|
||||||
b.Config.Bundle.Environment,
|
b.Config.Bundle.Target,
|
||||||
)
|
)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -12,15 +12,15 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestDefaultWorkspaceRoot(t *testing.T) {
|
func TestDefaultWorkspaceRoot(t *testing.T) {
|
||||||
bundle := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
Bundle: config.Bundle{
|
Bundle: config.Bundle{
|
||||||
Name: "name",
|
Name: "name",
|
||||||
Environment: "environment",
|
Target: "environment",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
err := mutator.DefineDefaultWorkspaceRoot().Apply(context.Background(), bundle)
|
err := bundle.Apply(context.Background(), b, mutator.DefineDefaultWorkspaceRoot())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, "~/.bundle/name/environment", bundle.Config.Workspace.RootPath)
|
assert.Equal(t, "~/.bundle/name/environment", b.Config.Workspace.RootPath)
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,94 @@
|
||||||
|
package mutator
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"path/filepath"
|
||||||
|
|
||||||
|
"github.com/databricks/cli/bundle"
|
||||||
|
"github.com/databricks/cli/bundle/libraries"
|
||||||
|
"github.com/databricks/databricks-sdk-go/service/pipelines"
|
||||||
|
)
|
||||||
|
|
||||||
|
type expandPipelineGlobPaths struct{}
|
||||||
|
|
||||||
|
func ExpandPipelineGlobPaths() bundle.Mutator {
|
||||||
|
return &expandPipelineGlobPaths{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *expandPipelineGlobPaths) Apply(_ context.Context, b *bundle.Bundle) error {
|
||||||
|
for key, pipeline := range b.Config.Resources.Pipelines {
|
||||||
|
dir, err := pipeline.ConfigFileDirectory()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("unable to determine directory for pipeline %s: %w", key, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
expandedLibraries := make([]pipelines.PipelineLibrary, 0)
|
||||||
|
for i := 0; i < len(pipeline.Libraries); i++ {
|
||||||
|
|
||||||
|
library := &pipeline.Libraries[i]
|
||||||
|
path := getGlobPatternToExpand(library)
|
||||||
|
if path == "" || !libraries.IsLocalPath(path) {
|
||||||
|
expandedLibraries = append(expandedLibraries, *library)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
matches, err := filepath.Glob(filepath.Join(dir, path))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(matches) == 0 {
|
||||||
|
expandedLibraries = append(expandedLibraries, *library)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, match := range matches {
|
||||||
|
m, err := filepath.Rel(dir, match)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
expandedLibraries = append(expandedLibraries, cloneWithPath(library, m))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
pipeline.Libraries = expandedLibraries
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func getGlobPatternToExpand(library *pipelines.PipelineLibrary) string {
|
||||||
|
if library.File != nil {
|
||||||
|
return library.File.Path
|
||||||
|
}
|
||||||
|
|
||||||
|
if library.Notebook != nil {
|
||||||
|
return library.Notebook.Path
|
||||||
|
}
|
||||||
|
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func cloneWithPath(library *pipelines.PipelineLibrary, path string) pipelines.PipelineLibrary {
|
||||||
|
if library.File != nil {
|
||||||
|
return pipelines.PipelineLibrary{
|
||||||
|
File: &pipelines.FileLibrary{
|
||||||
|
Path: path,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if library.Notebook != nil {
|
||||||
|
return pipelines.PipelineLibrary{
|
||||||
|
Notebook: &pipelines.NotebookLibrary{
|
||||||
|
Path: path,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return pipelines.PipelineLibrary{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*expandPipelineGlobPaths) Name() string {
|
||||||
|
return "ExpandPipelineGlobPaths"
|
||||||
|
}
|
|
@ -0,0 +1,166 @@
|
||||||
|
package mutator
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/databricks/cli/bundle"
|
||||||
|
"github.com/databricks/cli/bundle/config"
|
||||||
|
"github.com/databricks/cli/bundle/config/paths"
|
||||||
|
"github.com/databricks/cli/bundle/config/resources"
|
||||||
|
"github.com/databricks/databricks-sdk-go/service/compute"
|
||||||
|
"github.com/databricks/databricks-sdk-go/service/pipelines"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func touchEmptyFile(t *testing.T, path string) {
|
||||||
|
err := os.MkdirAll(filepath.Dir(path), 0700)
|
||||||
|
require.NoError(t, err)
|
||||||
|
f, err := os.Create(path)
|
||||||
|
require.NoError(t, err)
|
||||||
|
f.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestExpandGlobPathsInPipelines(t *testing.T) {
|
||||||
|
dir := t.TempDir()
|
||||||
|
|
||||||
|
touchEmptyFile(t, filepath.Join(dir, "test1.ipynb"))
|
||||||
|
touchEmptyFile(t, filepath.Join(dir, "test/test2.ipynb"))
|
||||||
|
touchEmptyFile(t, filepath.Join(dir, "test/test3.ipynb"))
|
||||||
|
touchEmptyFile(t, filepath.Join(dir, "test1.jar"))
|
||||||
|
touchEmptyFile(t, filepath.Join(dir, "test/test2.jar"))
|
||||||
|
touchEmptyFile(t, filepath.Join(dir, "test/test3.jar"))
|
||||||
|
touchEmptyFile(t, filepath.Join(dir, "test1.py"))
|
||||||
|
touchEmptyFile(t, filepath.Join(dir, "test/test2.py"))
|
||||||
|
touchEmptyFile(t, filepath.Join(dir, "test/test3.py"))
|
||||||
|
|
||||||
|
b := &bundle.Bundle{
|
||||||
|
Config: config.Root{
|
||||||
|
Path: dir,
|
||||||
|
Resources: config.Resources{
|
||||||
|
Pipelines: map[string]*resources.Pipeline{
|
||||||
|
"pipeline": {
|
||||||
|
Paths: paths.Paths{
|
||||||
|
ConfigFilePath: filepath.Join(dir, "resource.yml"),
|
||||||
|
},
|
||||||
|
PipelineSpec: &pipelines.PipelineSpec{
|
||||||
|
Libraries: []pipelines.PipelineLibrary{
|
||||||
|
{
|
||||||
|
Notebook: &pipelines.NotebookLibrary{
|
||||||
|
Path: "./**/*.ipynb",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Jar: "./*.jar",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
File: &pipelines.FileLibrary{
|
||||||
|
Path: "./**/*.py",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Maven: &compute.MavenLibrary{
|
||||||
|
Coordinates: "org.jsoup:jsoup:1.7.2",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Notebook: &pipelines.NotebookLibrary{
|
||||||
|
Path: "./test1.ipynb",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Notebook: &pipelines.NotebookLibrary{
|
||||||
|
Path: "/Workspace/Users/me@company.com/test.ipynb",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Notebook: &pipelines.NotebookLibrary{
|
||||||
|
Path: "dbfs:/me@company.com/test.ipynb",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Notebook: &pipelines.NotebookLibrary{
|
||||||
|
Path: "/Repos/somerepo/test.ipynb",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Notebook: &pipelines.NotebookLibrary{
|
||||||
|
Path: "./non-existent.ipynb",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
m := ExpandPipelineGlobPaths()
|
||||||
|
err := bundle.Apply(context.Background(), b, m)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
libraries := b.Config.Resources.Pipelines["pipeline"].Libraries
|
||||||
|
require.Len(t, libraries, 11)
|
||||||
|
|
||||||
|
// Making sure glob patterns are expanded correctly
|
||||||
|
require.True(t, containsNotebook(libraries, filepath.Join("test", "test2.ipynb")))
|
||||||
|
require.True(t, containsNotebook(libraries, filepath.Join("test", "test3.ipynb")))
|
||||||
|
require.True(t, containsFile(libraries, filepath.Join("test", "test2.py")))
|
||||||
|
require.True(t, containsFile(libraries, filepath.Join("test", "test3.py")))
|
||||||
|
|
||||||
|
// Making sure exact file references work as well
|
||||||
|
require.True(t, containsNotebook(libraries, "test1.ipynb"))
|
||||||
|
|
||||||
|
// Making sure absolute pass to remote FS file references work as well
|
||||||
|
require.True(t, containsNotebook(libraries, "/Workspace/Users/me@company.com/test.ipynb"))
|
||||||
|
require.True(t, containsNotebook(libraries, "dbfs:/me@company.com/test.ipynb"))
|
||||||
|
require.True(t, containsNotebook(libraries, "/Repos/somerepo/test.ipynb"))
|
||||||
|
|
||||||
|
// Making sure other libraries are not replaced
|
||||||
|
require.True(t, containsJar(libraries, "./*.jar"))
|
||||||
|
require.True(t, containsMaven(libraries, "org.jsoup:jsoup:1.7.2"))
|
||||||
|
require.True(t, containsNotebook(libraries, "./non-existent.ipynb"))
|
||||||
|
}
|
||||||
|
|
||||||
|
func containsNotebook(libraries []pipelines.PipelineLibrary, path string) bool {
|
||||||
|
for _, l := range libraries {
|
||||||
|
if l.Notebook != nil && l.Notebook.Path == path {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func containsJar(libraries []pipelines.PipelineLibrary, path string) bool {
|
||||||
|
for _, l := range libraries {
|
||||||
|
if l.Jar == path {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func containsMaven(libraries []pipelines.PipelineLibrary, coordinates string) bool {
|
||||||
|
for _, l := range libraries {
|
||||||
|
if l.Maven != nil && l.Maven.Coordinates == coordinates {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func containsFile(libraries []pipelines.PipelineLibrary, path string) bool {
|
||||||
|
for _, l := range libraries {
|
||||||
|
if l.File != nil && l.File.Path == path {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return false
|
||||||
|
}
|
|
@ -13,7 +13,7 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestExpandWorkspaceRoot(t *testing.T) {
|
func TestExpandWorkspaceRoot(t *testing.T) {
|
||||||
bundle := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
Workspace: config.Workspace{
|
Workspace: config.Workspace{
|
||||||
CurrentUser: &config.User{
|
CurrentUser: &config.User{
|
||||||
|
@ -25,13 +25,13 @@ func TestExpandWorkspaceRoot(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
err := mutator.ExpandWorkspaceRoot().Apply(context.Background(), bundle)
|
err := bundle.Apply(context.Background(), b, mutator.ExpandWorkspaceRoot())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, "/Users/jane@doe.com/foo", bundle.Config.Workspace.RootPath)
|
assert.Equal(t, "/Users/jane@doe.com/foo", b.Config.Workspace.RootPath)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestExpandWorkspaceRootDoesNothing(t *testing.T) {
|
func TestExpandWorkspaceRootDoesNothing(t *testing.T) {
|
||||||
bundle := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
Workspace: config.Workspace{
|
Workspace: config.Workspace{
|
||||||
CurrentUser: &config.User{
|
CurrentUser: &config.User{
|
||||||
|
@ -43,13 +43,13 @@ func TestExpandWorkspaceRootDoesNothing(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
err := mutator.ExpandWorkspaceRoot().Apply(context.Background(), bundle)
|
err := bundle.Apply(context.Background(), b, mutator.ExpandWorkspaceRoot())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, "/Users/charly@doe.com/foo", bundle.Config.Workspace.RootPath)
|
assert.Equal(t, "/Users/charly@doe.com/foo", b.Config.Workspace.RootPath)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestExpandWorkspaceRootWithoutRoot(t *testing.T) {
|
func TestExpandWorkspaceRootWithoutRoot(t *testing.T) {
|
||||||
bundle := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
Workspace: config.Workspace{
|
Workspace: config.Workspace{
|
||||||
CurrentUser: &config.User{
|
CurrentUser: &config.User{
|
||||||
|
@ -60,18 +60,18 @@ func TestExpandWorkspaceRootWithoutRoot(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
err := mutator.ExpandWorkspaceRoot().Apply(context.Background(), bundle)
|
err := bundle.Apply(context.Background(), b, mutator.ExpandWorkspaceRoot())
|
||||||
require.Error(t, err)
|
require.Error(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestExpandWorkspaceRootWithoutCurrentUser(t *testing.T) {
|
func TestExpandWorkspaceRootWithoutCurrentUser(t *testing.T) {
|
||||||
bundle := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
Workspace: config.Workspace{
|
Workspace: config.Workspace{
|
||||||
RootPath: "~/foo",
|
RootPath: "~/foo",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
err := mutator.ExpandWorkspaceRoot().Apply(context.Background(), bundle)
|
err := bundle.Apply(context.Background(), b, mutator.ExpandWorkspaceRoot())
|
||||||
require.Error(t, err)
|
require.Error(t, err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,35 @@
|
||||||
|
package mutator
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
"github.com/databricks/cli/bundle"
|
||||||
|
)
|
||||||
|
|
||||||
|
type ifMutator struct {
|
||||||
|
condition func(*bundle.Bundle) bool
|
||||||
|
onTrueMutator bundle.Mutator
|
||||||
|
onFalseMutator bundle.Mutator
|
||||||
|
}
|
||||||
|
|
||||||
|
func If(
|
||||||
|
condition func(*bundle.Bundle) bool,
|
||||||
|
onTrueMutator bundle.Mutator,
|
||||||
|
onFalseMutator bundle.Mutator,
|
||||||
|
) bundle.Mutator {
|
||||||
|
return &ifMutator{
|
||||||
|
condition, onTrueMutator, onFalseMutator,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *ifMutator) Apply(ctx context.Context, b *bundle.Bundle) error {
|
||||||
|
if m.condition(b) {
|
||||||
|
return bundle.Apply(ctx, b, m.onTrueMutator)
|
||||||
|
} else {
|
||||||
|
return bundle.Apply(ctx, b, m.onFalseMutator)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *ifMutator) Name() string {
|
||||||
|
return "If"
|
||||||
|
}
|
|
@ -0,0 +1,30 @@
|
||||||
|
package mutator
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
"github.com/databricks/cli/bundle"
|
||||||
|
"github.com/databricks/cli/bundle/config/variable"
|
||||||
|
)
|
||||||
|
|
||||||
|
type initializeVariables struct{}
|
||||||
|
|
||||||
|
// InitializeVariables initializes nil variables to their corresponding zero values.
|
||||||
|
func InitializeVariables() bundle.Mutator {
|
||||||
|
return &initializeVariables{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *initializeVariables) Name() string {
|
||||||
|
return "InitializeVariables"
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *initializeVariables) Apply(ctx context.Context, b *bundle.Bundle) error {
|
||||||
|
vars := b.Config.Variables
|
||||||
|
for k, v := range vars {
|
||||||
|
if v == nil {
|
||||||
|
vars[k] = &variable.Variable{}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
|
@ -0,0 +1,42 @@
|
||||||
|
package mutator_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/databricks/cli/bundle"
|
||||||
|
"github.com/databricks/cli/bundle/config"
|
||||||
|
"github.com/databricks/cli/bundle/config/mutator"
|
||||||
|
"github.com/databricks/cli/bundle/config/variable"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestInitializeVariables(t *testing.T) {
|
||||||
|
b := &bundle.Bundle{
|
||||||
|
Config: config.Root{
|
||||||
|
Variables: map[string]*variable.Variable{
|
||||||
|
"foo": nil,
|
||||||
|
"bar": {
|
||||||
|
Description: "This is a description",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
err := bundle.Apply(context.Background(), b, mutator.InitializeVariables())
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.NotNil(t, b.Config.Variables["foo"])
|
||||||
|
assert.NotNil(t, b.Config.Variables["bar"])
|
||||||
|
assert.Equal(t, "This is a description", b.Config.Variables["bar"].Description)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestInitializeVariablesWithoutVariables(t *testing.T) {
|
||||||
|
b := &bundle.Bundle{
|
||||||
|
Config: config.Root{
|
||||||
|
Variables: nil,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
err := bundle.Apply(context.Background(), b, mutator.InitializeVariables())
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Nil(t, b.Config.Variables)
|
||||||
|
}
|
|
@ -0,0 +1,25 @@
|
||||||
|
package mutator
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
"github.com/databricks/cli/bundle"
|
||||||
|
)
|
||||||
|
|
||||||
|
type initializeWorkspaceClient struct{}
|
||||||
|
|
||||||
|
func InitializeWorkspaceClient() bundle.Mutator {
|
||||||
|
return &initializeWorkspaceClient{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *initializeWorkspaceClient) Name() string {
|
||||||
|
return "InitializeWorkspaceClient"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Apply initializes the workspace client for the bundle. We do this here so
|
||||||
|
// downstream calls to b.WorkspaceClient() do not panic if there's an error in the
|
||||||
|
// auth configuration.
|
||||||
|
func (m *initializeWorkspaceClient) Apply(_ context.Context, b *bundle.Bundle) error {
|
||||||
|
_, err := b.InitializeWorkspaceClient()
|
||||||
|
return err
|
||||||
|
}
|
|
@ -2,6 +2,7 @@ package mutator
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"path/filepath"
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle"
|
"github.com/databricks/cli/bundle"
|
||||||
"github.com/databricks/cli/libs/git"
|
"github.com/databricks/cli/libs/git"
|
||||||
|
@ -24,17 +25,20 @@ func (m *loadGitDetails) Apply(ctx context.Context, b *bundle.Bundle) error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
// load branch name if undefined
|
|
||||||
if b.Config.Bundle.Git.Branch == "" {
|
// Read branch name of current checkout
|
||||||
branch, err := repo.CurrentBranch()
|
branch, err := repo.CurrentBranch()
|
||||||
if err != nil {
|
if err == nil {
|
||||||
log.Warnf(ctx, "failed to load current branch: %s", err)
|
|
||||||
} else {
|
|
||||||
b.Config.Bundle.Git.Branch = branch
|
|
||||||
b.Config.Bundle.Git.ActualBranch = branch
|
b.Config.Bundle.Git.ActualBranch = branch
|
||||||
|
if b.Config.Bundle.Git.Branch == "" {
|
||||||
|
// Only load branch if there's no user defined value
|
||||||
b.Config.Bundle.Git.Inferred = true
|
b.Config.Bundle.Git.Inferred = true
|
||||||
|
b.Config.Bundle.Git.Branch = branch
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
log.Warnf(ctx, "failed to load current branch: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// load commit hash if undefined
|
// load commit hash if undefined
|
||||||
if b.Config.Bundle.Git.Commit == "" {
|
if b.Config.Bundle.Git.Commit == "" {
|
||||||
commit, err := repo.LatestCommit()
|
commit, err := repo.LatestCommit()
|
||||||
|
@ -49,5 +53,17 @@ func (m *loadGitDetails) Apply(ctx context.Context, b *bundle.Bundle) error {
|
||||||
remoteUrl := repo.OriginUrl()
|
remoteUrl := repo.OriginUrl()
|
||||||
b.Config.Bundle.Git.OriginURL = remoteUrl
|
b.Config.Bundle.Git.OriginURL = remoteUrl
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Compute relative path of the bundle root from the Git repo root.
|
||||||
|
absBundlePath, err := filepath.Abs(b.Config.Path)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// repo.Root() returns the absolute path of the repo
|
||||||
|
relBundlePath, err := filepath.Rel(repo.Root(), absBundlePath)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
b.Config.Bundle.Git.BundleRootPath = filepath.ToSlash(relBundlePath)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -2,16 +2,20 @@ package mutator
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/databricks/cli/bundle"
|
"github.com/databricks/cli/bundle"
|
||||||
|
"github.com/databricks/cli/bundle/config"
|
||||||
|
"github.com/databricks/cli/bundle/scripts"
|
||||||
)
|
)
|
||||||
|
|
||||||
func DefaultMutators() []bundle.Mutator {
|
func DefaultMutators() []bundle.Mutator {
|
||||||
return []bundle.Mutator{
|
return []bundle.Mutator{
|
||||||
|
scripts.Execute(config.ScriptPreInit),
|
||||||
ProcessRootIncludes(),
|
ProcessRootIncludes(),
|
||||||
DefineDefaultEnvironment(),
|
InitializeVariables(),
|
||||||
|
DefineDefaultTarget(),
|
||||||
LoadGitDetails(),
|
LoadGitDetails(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func DefaultMutatorsForEnvironment(env string) []bundle.Mutator {
|
func DefaultMutatorsForTarget(env string) []bundle.Mutator {
|
||||||
return append(DefaultMutators(), SelectEnvironment(env))
|
return append(DefaultMutators(), SelectTarget(env))
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,21 @@
|
||||||
|
package mutator
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
"github.com/databricks/cli/bundle"
|
||||||
|
)
|
||||||
|
|
||||||
|
type noop struct{}
|
||||||
|
|
||||||
|
func (*noop) Apply(context.Context, *bundle.Bundle) error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*noop) Name() string {
|
||||||
|
return "NoOp"
|
||||||
|
}
|
||||||
|
|
||||||
|
func NoOp() bundle.Mutator {
|
||||||
|
return &noop{}
|
||||||
|
}
|
|
@ -3,11 +3,11 @@ package mutator
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle"
|
"github.com/databricks/cli/bundle"
|
||||||
"github.com/databricks/cli/bundle/config"
|
"github.com/databricks/cli/bundle/config"
|
||||||
"github.com/databricks/cli/bundle/config/resources"
|
"github.com/databricks/cli/bundle/config/resources"
|
||||||
|
"github.com/databricks/cli/libs/env"
|
||||||
)
|
)
|
||||||
|
|
||||||
type overrideCompute struct{}
|
type overrideCompute struct{}
|
||||||
|
@ -23,10 +23,10 @@ func (m *overrideCompute) Name() string {
|
||||||
func overrideJobCompute(j *resources.Job, compute string) {
|
func overrideJobCompute(j *resources.Job, compute string) {
|
||||||
for i := range j.Tasks {
|
for i := range j.Tasks {
|
||||||
task := &j.Tasks[i]
|
task := &j.Tasks[i]
|
||||||
if task.NewCluster != nil {
|
if task.NewCluster != nil || task.ExistingClusterId != "" || task.ComputeKey != "" || task.JobClusterKey != "" {
|
||||||
task.NewCluster = nil
|
task.NewCluster = nil
|
||||||
task.ExistingClusterId = compute
|
task.JobClusterKey = ""
|
||||||
} else if task.ExistingClusterId != "" {
|
task.ComputeKey = ""
|
||||||
task.ExistingClusterId = compute
|
task.ExistingClusterId = compute
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -35,12 +35,12 @@ func overrideJobCompute(j *resources.Job, compute string) {
|
||||||
func (m *overrideCompute) Apply(ctx context.Context, b *bundle.Bundle) error {
|
func (m *overrideCompute) Apply(ctx context.Context, b *bundle.Bundle) error {
|
||||||
if b.Config.Bundle.Mode != config.Development {
|
if b.Config.Bundle.Mode != config.Development {
|
||||||
if b.Config.Bundle.ComputeID != "" {
|
if b.Config.Bundle.ComputeID != "" {
|
||||||
return fmt.Errorf("cannot override compute for an environment that does not use 'mode: development'")
|
return fmt.Errorf("cannot override compute for an target that does not use 'mode: development'")
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
if os.Getenv("DATABRICKS_CLUSTER_ID") != "" {
|
if v := env.Get(ctx, "DATABRICKS_CLUSTER_ID"); v != "" {
|
||||||
b.Config.Bundle.ComputeID = os.Getenv("DATABRICKS_CLUSTER_ID")
|
b.Config.Bundle.ComputeID = v
|
||||||
}
|
}
|
||||||
|
|
||||||
if b.Config.Bundle.ComputeID == "" {
|
if b.Config.Bundle.ComputeID == "" {
|
||||||
|
|
|
@ -2,7 +2,6 @@ package mutator_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"os"
|
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle"
|
"github.com/databricks/cli/bundle"
|
||||||
|
@ -16,8 +15,8 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestOverrideDevelopment(t *testing.T) {
|
func TestOverrideDevelopment(t *testing.T) {
|
||||||
os.Setenv("DATABRICKS_CLUSTER_ID", "")
|
t.Setenv("DATABRICKS_CLUSTER_ID", "")
|
||||||
bundle := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
Bundle: config.Bundle{
|
Bundle: config.Bundle{
|
||||||
Mode: config.Development,
|
Mode: config.Development,
|
||||||
|
@ -34,6 +33,12 @@ func TestOverrideDevelopment(t *testing.T) {
|
||||||
{
|
{
|
||||||
ExistingClusterId: "cluster2",
|
ExistingClusterId: "cluster2",
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
ComputeKey: "compute_key",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
JobClusterKey: "cluster_key",
|
||||||
|
},
|
||||||
},
|
},
|
||||||
}},
|
}},
|
||||||
},
|
},
|
||||||
|
@ -42,16 +47,22 @@ func TestOverrideDevelopment(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
m := mutator.OverrideCompute()
|
m := mutator.OverrideCompute()
|
||||||
err := m.Apply(context.Background(), bundle)
|
err := bundle.Apply(context.Background(), b, m)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Nil(t, bundle.Config.Resources.Jobs["job1"].Tasks[0].NewCluster)
|
assert.Nil(t, b.Config.Resources.Jobs["job1"].Tasks[0].NewCluster)
|
||||||
assert.Equal(t, "newClusterID", bundle.Config.Resources.Jobs["job1"].Tasks[0].ExistingClusterId)
|
assert.Equal(t, "newClusterID", b.Config.Resources.Jobs["job1"].Tasks[0].ExistingClusterId)
|
||||||
assert.Equal(t, "newClusterID", bundle.Config.Resources.Jobs["job1"].Tasks[1].ExistingClusterId)
|
assert.Equal(t, "newClusterID", b.Config.Resources.Jobs["job1"].Tasks[1].ExistingClusterId)
|
||||||
|
assert.Equal(t, "newClusterID", b.Config.Resources.Jobs["job1"].Tasks[2].ExistingClusterId)
|
||||||
|
assert.Equal(t, "newClusterID", b.Config.Resources.Jobs["job1"].Tasks[3].ExistingClusterId)
|
||||||
|
|
||||||
|
assert.Nil(t, b.Config.Resources.Jobs["job1"].Tasks[0].NewCluster)
|
||||||
|
assert.Empty(t, b.Config.Resources.Jobs["job1"].Tasks[2].ComputeKey)
|
||||||
|
assert.Empty(t, b.Config.Resources.Jobs["job1"].Tasks[3].JobClusterKey)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestOverrideDevelopmentEnv(t *testing.T) {
|
func TestOverrideDevelopmentEnv(t *testing.T) {
|
||||||
os.Setenv("DATABRICKS_CLUSTER_ID", "newClusterId")
|
t.Setenv("DATABRICKS_CLUSTER_ID", "newClusterId")
|
||||||
bundle := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
Resources: config.Resources{
|
Resources: config.Resources{
|
||||||
Jobs: map[string]*resources.Job{
|
Jobs: map[string]*resources.Job{
|
||||||
|
@ -72,13 +83,38 @@ func TestOverrideDevelopmentEnv(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
m := mutator.OverrideCompute()
|
m := mutator.OverrideCompute()
|
||||||
err := m.Apply(context.Background(), bundle)
|
err := bundle.Apply(context.Background(), b, m)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, "cluster2", bundle.Config.Resources.Jobs["job1"].Tasks[1].ExistingClusterId)
|
assert.Equal(t, "cluster2", b.Config.Resources.Jobs["job1"].Tasks[1].ExistingClusterId)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestOverridePipelineTask(t *testing.T) {
|
||||||
|
t.Setenv("DATABRICKS_CLUSTER_ID", "newClusterId")
|
||||||
|
b := &bundle.Bundle{
|
||||||
|
Config: config.Root{
|
||||||
|
Resources: config.Resources{
|
||||||
|
Jobs: map[string]*resources.Job{
|
||||||
|
"job1": {JobSettings: &jobs.JobSettings{
|
||||||
|
Name: "job1",
|
||||||
|
Tasks: []jobs.Task{
|
||||||
|
{
|
||||||
|
PipelineTask: &jobs.PipelineTask{},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
m := mutator.OverrideCompute()
|
||||||
|
err := bundle.Apply(context.Background(), b, m)
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Empty(t, b.Config.Resources.Jobs["job1"].Tasks[0].ExistingClusterId)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestOverrideProduction(t *testing.T) {
|
func TestOverrideProduction(t *testing.T) {
|
||||||
bundle := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
Bundle: config.Bundle{
|
Bundle: config.Bundle{
|
||||||
ComputeID: "newClusterID",
|
ComputeID: "newClusterID",
|
||||||
|
@ -102,13 +138,13 @@ func TestOverrideProduction(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
m := mutator.OverrideCompute()
|
m := mutator.OverrideCompute()
|
||||||
err := m.Apply(context.Background(), bundle)
|
err := bundle.Apply(context.Background(), b, m)
|
||||||
require.Error(t, err)
|
require.Error(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestOverrideProductionEnv(t *testing.T) {
|
func TestOverrideProductionEnv(t *testing.T) {
|
||||||
os.Setenv("DATABRICKS_CLUSTER_ID", "newClusterId")
|
t.Setenv("DATABRICKS_CLUSTER_ID", "newClusterId")
|
||||||
bundle := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
Resources: config.Resources{
|
Resources: config.Resources{
|
||||||
Jobs: map[string]*resources.Job{
|
Jobs: map[string]*resources.Job{
|
||||||
|
@ -129,6 +165,6 @@ func TestOverrideProductionEnv(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
m := mutator.OverrideCompute()
|
m := mutator.OverrideCompute()
|
||||||
err := m.Apply(context.Background(), bundle)
|
err := bundle.Apply(context.Background(), b, m)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -7,6 +7,7 @@ import (
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle"
|
"github.com/databricks/cli/bundle"
|
||||||
"github.com/databricks/cli/bundle/config"
|
"github.com/databricks/cli/bundle/config"
|
||||||
|
"github.com/databricks/cli/libs/tags"
|
||||||
)
|
)
|
||||||
|
|
||||||
type populateCurrentUser struct{}
|
type populateCurrentUser struct{}
|
||||||
|
@ -21,6 +22,10 @@ func (m *populateCurrentUser) Name() string {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *populateCurrentUser) Apply(ctx context.Context, b *bundle.Bundle) error {
|
func (m *populateCurrentUser) Apply(ctx context.Context, b *bundle.Bundle) error {
|
||||||
|
if b.Config.Workspace.CurrentUser != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
w := b.WorkspaceClient()
|
w := b.WorkspaceClient()
|
||||||
me, err := w.CurrentUser.Me(ctx)
|
me, err := w.CurrentUser.Me(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -31,18 +36,24 @@ func (m *populateCurrentUser) Apply(ctx context.Context, b *bundle.Bundle) error
|
||||||
ShortName: getShortUserName(me.UserName),
|
ShortName: getShortUserName(me.UserName),
|
||||||
User: me,
|
User: me,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Configure tagging object now that we know we have a valid client.
|
||||||
|
b.Tagging = tags.ForCloud(w.Config)
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func replaceNonAlphanumeric(r rune) rune {
|
||||||
|
if unicode.IsLetter(r) || unicode.IsDigit(r) {
|
||||||
|
return r
|
||||||
|
}
|
||||||
|
return '_'
|
||||||
|
}
|
||||||
|
|
||||||
// Get a short-form username, based on the user's primary email address.
|
// Get a short-form username, based on the user's primary email address.
|
||||||
// We leave the full range of unicode letters in tact, but remove all "special" characters,
|
// We leave the full range of unicode letters in tact, but remove all "special" characters,
|
||||||
// including dots, which are not supported in e.g. experiment names.
|
// including dots, which are not supported in e.g. experiment names.
|
||||||
func getShortUserName(emailAddress string) string {
|
func getShortUserName(emailAddress string) string {
|
||||||
r := []rune(strings.Split(emailAddress, "@")[0])
|
local, _, _ := strings.Cut(emailAddress, "@")
|
||||||
for i := 0; i < len(r); i++ {
|
return strings.Map(replaceNonAlphanumeric, local)
|
||||||
if !unicode.IsLetter(r[i]) {
|
|
||||||
r[i] = '_'
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return string(r)
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,6 +1,10 @@
|
||||||
package mutator
|
package mutator
|
||||||
|
|
||||||
import "testing"
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
func TestPopulateCurrentUser(t *testing.T) {
|
func TestPopulateCurrentUser(t *testing.T) {
|
||||||
// We need to implement workspace client mocking to implement this test.
|
// We need to implement workspace client mocking to implement this test.
|
||||||
|
@ -13,28 +17,60 @@ func TestGetShortUserName(t *testing.T) {
|
||||||
expected string
|
expected string
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
name: "test alphanumeric characters",
|
email: "test.user.1234@example.com",
|
||||||
email: "test.user@example.com",
|
expected: "test_user_1234",
|
||||||
expected: "test_user",
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "test unicode characters",
|
|
||||||
email: "tést.üser@example.com",
|
email: "tést.üser@example.com",
|
||||||
expected: "tést_üser",
|
expected: "tést_üser",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "test special characters",
|
|
||||||
email: "test$.user@example.com",
|
email: "test$.user@example.com",
|
||||||
expected: "test__user",
|
expected: "test__user",
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
email: `jöhn.dœ@domain.com`, // Using non-ASCII characters.
|
||||||
|
expected: "jöhn_dœ",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
email: `first+tag@email.com`, // The plus (+) sign is used for "sub-addressing" in some email services.
|
||||||
|
expected: "first_tag",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
email: `email@sub.domain.com`, // Using a sub-domain.
|
||||||
|
expected: "email",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
email: `"_quoted"@domain.com`, // Quoted strings can be part of the local-part.
|
||||||
|
expected: "__quoted_",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
email: `name-o'mally@website.org`, // Single quote in the local-part.
|
||||||
|
expected: "name_o_mally",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
email: `user%domain@external.com`, // Percent sign can be used for email routing in legacy systems.
|
||||||
|
expected: "user_domain",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
email: `long.name.with.dots@domain.net`, // Multiple dots in the local-part.
|
||||||
|
expected: "long_name_with_dots",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
email: `me&you@together.com`, // Using an ampersand (&) in the local-part.
|
||||||
|
expected: "me_you",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
email: `user!def!xyz@domain.org`, // The exclamation mark can be valid in some legacy systems.
|
||||||
|
expected: "user_def_xyz",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
email: `admin@ιντερνετ.com`, // Domain in non-ASCII characters (IDN or Internationalized Domain Name).
|
||||||
|
expected: "admin",
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
assert.Equal(t, tt.expected, getShortUserName(tt.email))
|
||||||
result := getShortUserName(tt.email)
|
|
||||||
if result != tt.expected {
|
|
||||||
t.Errorf("getShortUserName(%q) = %q; expected %q", tt.email, result, tt.expected)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,190 +0,0 @@
|
||||||
package mutator
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"reflect"
|
|
||||||
"strings"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle"
|
|
||||||
"github.com/databricks/cli/bundle/config"
|
|
||||||
"github.com/databricks/cli/bundle/config/resources"
|
|
||||||
"github.com/databricks/databricks-sdk-go/service/iam"
|
|
||||||
"github.com/databricks/databricks-sdk-go/service/jobs"
|
|
||||||
"github.com/databricks/databricks-sdk-go/service/ml"
|
|
||||||
"github.com/databricks/databricks-sdk-go/service/pipelines"
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
)
|
|
||||||
|
|
||||||
func mockBundle(mode config.Mode) *bundle.Bundle {
|
|
||||||
return &bundle.Bundle{
|
|
||||||
Config: config.Root{
|
|
||||||
Bundle: config.Bundle{
|
|
||||||
Mode: mode,
|
|
||||||
Git: config.Git{
|
|
||||||
OriginURL: "http://origin",
|
|
||||||
Branch: "main",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Workspace: config.Workspace{
|
|
||||||
CurrentUser: &config.User{
|
|
||||||
ShortName: "lennart",
|
|
||||||
User: &iam.User{
|
|
||||||
UserName: "lennart@company.com",
|
|
||||||
Id: "1",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
StatePath: "/Users/lennart@company.com/.bundle/x/y/state",
|
|
||||||
ArtifactsPath: "/Users/lennart@company.com/.bundle/x/y/artifacts",
|
|
||||||
FilesPath: "/Users/lennart@company.com/.bundle/x/y/files",
|
|
||||||
},
|
|
||||||
Resources: config.Resources{
|
|
||||||
Jobs: map[string]*resources.Job{
|
|
||||||
"job1": {JobSettings: &jobs.JobSettings{Name: "job1"}},
|
|
||||||
},
|
|
||||||
Pipelines: map[string]*resources.Pipeline{
|
|
||||||
"pipeline1": {PipelineSpec: &pipelines.PipelineSpec{Name: "pipeline1"}},
|
|
||||||
},
|
|
||||||
Experiments: map[string]*resources.MlflowExperiment{
|
|
||||||
"experiment1": {Experiment: &ml.Experiment{Name: "/Users/lennart.kats@databricks.com/experiment1"}},
|
|
||||||
"experiment2": {Experiment: &ml.Experiment{Name: "experiment2"}},
|
|
||||||
},
|
|
||||||
Models: map[string]*resources.MlflowModel{
|
|
||||||
"model1": {Model: &ml.Model{Name: "model1"}},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestProcessEnvironmentModeDevelopment(t *testing.T) {
|
|
||||||
bundle := mockBundle(config.Development)
|
|
||||||
|
|
||||||
m := ProcessEnvironmentMode()
|
|
||||||
err := m.Apply(context.Background(), bundle)
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Equal(t, "[dev lennart] job1", bundle.Config.Resources.Jobs["job1"].Name)
|
|
||||||
assert.Equal(t, "[dev lennart] pipeline1", bundle.Config.Resources.Pipelines["pipeline1"].Name)
|
|
||||||
assert.Equal(t, "/Users/lennart.kats@databricks.com/[dev lennart] experiment1", bundle.Config.Resources.Experiments["experiment1"].Name)
|
|
||||||
assert.Equal(t, "[dev lennart] experiment2", bundle.Config.Resources.Experiments["experiment2"].Name)
|
|
||||||
assert.Equal(t, "[dev lennart] model1", bundle.Config.Resources.Models["model1"].Name)
|
|
||||||
assert.Equal(t, "dev", bundle.Config.Resources.Experiments["experiment1"].Experiment.Tags[0].Key)
|
|
||||||
assert.True(t, bundle.Config.Resources.Pipelines["pipeline1"].PipelineSpec.Development)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestProcessEnvironmentModeDefault(t *testing.T) {
|
|
||||||
bundle := mockBundle("")
|
|
||||||
|
|
||||||
m := ProcessEnvironmentMode()
|
|
||||||
err := m.Apply(context.Background(), bundle)
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Equal(t, "job1", bundle.Config.Resources.Jobs["job1"].Name)
|
|
||||||
assert.Equal(t, "pipeline1", bundle.Config.Resources.Pipelines["pipeline1"].Name)
|
|
||||||
assert.False(t, bundle.Config.Resources.Pipelines["pipeline1"].PipelineSpec.Development)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestProcessEnvironmentModeProduction(t *testing.T) {
|
|
||||||
bundle := mockBundle(config.Production)
|
|
||||||
|
|
||||||
err := validateProductionMode(context.Background(), bundle, false)
|
|
||||||
require.ErrorContains(t, err, "state_path")
|
|
||||||
|
|
||||||
bundle.Config.Workspace.StatePath = "/Shared/.bundle/x/y/state"
|
|
||||||
bundle.Config.Workspace.ArtifactsPath = "/Shared/.bundle/x/y/artifacts"
|
|
||||||
bundle.Config.Workspace.FilesPath = "/Shared/.bundle/x/y/files"
|
|
||||||
|
|
||||||
err = validateProductionMode(context.Background(), bundle, false)
|
|
||||||
require.ErrorContains(t, err, "production")
|
|
||||||
|
|
||||||
permissions := []resources.Permission{
|
|
||||||
{
|
|
||||||
Level: "CAN_MANAGE",
|
|
||||||
UserName: "user@company.com",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
bundle.Config.Resources.Jobs["job1"].Permissions = permissions
|
|
||||||
bundle.Config.Resources.Jobs["job1"].RunAs = &jobs.JobRunAs{UserName: "user@company.com"}
|
|
||||||
bundle.Config.Resources.Pipelines["pipeline1"].Permissions = permissions
|
|
||||||
bundle.Config.Resources.Experiments["experiment1"].Permissions = permissions
|
|
||||||
bundle.Config.Resources.Experiments["experiment2"].Permissions = permissions
|
|
||||||
bundle.Config.Resources.Models["model1"].Permissions = permissions
|
|
||||||
|
|
||||||
err = validateProductionMode(context.Background(), bundle, false)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
assert.Equal(t, "job1", bundle.Config.Resources.Jobs["job1"].Name)
|
|
||||||
assert.Equal(t, "pipeline1", bundle.Config.Resources.Pipelines["pipeline1"].Name)
|
|
||||||
assert.False(t, bundle.Config.Resources.Pipelines["pipeline1"].PipelineSpec.Development)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestProcessEnvironmentModeProductionGit(t *testing.T) {
|
|
||||||
bundle := mockBundle(config.Production)
|
|
||||||
|
|
||||||
// Pretend the user didn't set Git configuration explicitly
|
|
||||||
bundle.Config.Bundle.Git.Inferred = true
|
|
||||||
|
|
||||||
err := validateProductionMode(context.Background(), bundle, false)
|
|
||||||
require.ErrorContains(t, err, "git")
|
|
||||||
bundle.Config.Bundle.Git.Inferred = false
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestProcessEnvironmentModeProductionOkForPrincipal(t *testing.T) {
|
|
||||||
bundle := mockBundle(config.Production)
|
|
||||||
|
|
||||||
// Our environment has all kinds of problems when not using service principals ...
|
|
||||||
err := validateProductionMode(context.Background(), bundle, false)
|
|
||||||
require.Error(t, err)
|
|
||||||
|
|
||||||
// ... but we're much less strict when a principal is used
|
|
||||||
err = validateProductionMode(context.Background(), bundle, true)
|
|
||||||
require.NoError(t, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Make sure that we have test coverage for all resource types
|
|
||||||
func TestAllResourcesMocked(t *testing.T) {
|
|
||||||
bundle := mockBundle(config.Development)
|
|
||||||
resources := reflect.ValueOf(bundle.Config.Resources)
|
|
||||||
|
|
||||||
for i := 0; i < resources.NumField(); i++ {
|
|
||||||
field := resources.Field(i)
|
|
||||||
if field.Kind() == reflect.Map {
|
|
||||||
assert.True(
|
|
||||||
t,
|
|
||||||
!field.IsNil() && field.Len() > 0,
|
|
||||||
"process_environment_mode should support '%s' (please add it to process_environment_mode.go and extend the test suite)",
|
|
||||||
resources.Type().Field(i).Name,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Make sure that we at least rename all resources
|
|
||||||
func TestAllResourcesRenamed(t *testing.T) {
|
|
||||||
bundle := mockBundle(config.Development)
|
|
||||||
resources := reflect.ValueOf(bundle.Config.Resources)
|
|
||||||
|
|
||||||
m := ProcessEnvironmentMode()
|
|
||||||
err := m.Apply(context.Background(), bundle)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
for i := 0; i < resources.NumField(); i++ {
|
|
||||||
field := resources.Field(i)
|
|
||||||
|
|
||||||
if field.Kind() == reflect.Map {
|
|
||||||
for _, key := range field.MapKeys() {
|
|
||||||
resource := field.MapIndex(key)
|
|
||||||
nameField := resource.Elem().FieldByName("Name")
|
|
||||||
if nameField.IsValid() && nameField.Kind() == reflect.String {
|
|
||||||
assert.True(
|
|
||||||
t,
|
|
||||||
strings.Contains(nameField.String(), "dev"),
|
|
||||||
"process_environment_mode should rename '%s' in '%s'",
|
|
||||||
key,
|
|
||||||
resources.Type().Field(i).Name,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -15,7 +15,7 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestProcessInclude(t *testing.T) {
|
func TestProcessInclude(t *testing.T) {
|
||||||
bundle := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
Path: t.TempDir(),
|
Path: t.TempDir(),
|
||||||
Workspace: config.Workspace{
|
Workspace: config.Workspace{
|
||||||
|
@ -25,14 +25,14 @@ func TestProcessInclude(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
relPath := "./file.yml"
|
relPath := "./file.yml"
|
||||||
fullPath := filepath.Join(bundle.Config.Path, relPath)
|
fullPath := filepath.Join(b.Config.Path, relPath)
|
||||||
f, err := os.Create(fullPath)
|
f, err := os.Create(fullPath)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
fmt.Fprint(f, "workspace:\n host: bar\n")
|
fmt.Fprint(f, "workspace:\n host: bar\n")
|
||||||
f.Close()
|
f.Close()
|
||||||
|
|
||||||
assert.Equal(t, "foo", bundle.Config.Workspace.Host)
|
assert.Equal(t, "foo", b.Config.Workspace.Host)
|
||||||
err = mutator.ProcessInclude(fullPath, relPath).Apply(context.Background(), bundle)
|
err = bundle.Apply(context.Background(), b, mutator.ProcessInclude(fullPath, relPath))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, "bar", bundle.Config.Workspace.Host)
|
assert.Equal(t, "bar", b.Config.Workspace.Host)
|
||||||
}
|
}
|
||||||
|
|
|
@ -5,16 +5,17 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
"slices"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle"
|
"github.com/databricks/cli/bundle"
|
||||||
"github.com/databricks/cli/bundle/config"
|
"github.com/databricks/cli/bundle/config"
|
||||||
"golang.org/x/exp/slices"
|
"github.com/databricks/cli/bundle/env"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Get extra include paths from environment variable
|
// Get extra include paths from environment variable
|
||||||
func GetExtraIncludePaths() []string {
|
func getExtraIncludePaths(ctx context.Context) []string {
|
||||||
value, exists := os.LookupEnv(bundle.ExtraIncludePathsKey)
|
value, exists := env.Includes(ctx)
|
||||||
if !exists {
|
if !exists {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -48,7 +49,7 @@ func (m *processRootIncludes) Apply(ctx context.Context, b *bundle.Bundle) error
|
||||||
var files []string
|
var files []string
|
||||||
|
|
||||||
// Converts extra include paths from environment variable to relative paths
|
// Converts extra include paths from environment variable to relative paths
|
||||||
for _, extraIncludePath := range GetExtraIncludePaths() {
|
for _, extraIncludePath := range getExtraIncludePaths(ctx) {
|
||||||
if filepath.IsAbs(extraIncludePath) {
|
if filepath.IsAbs(extraIncludePath) {
|
||||||
rel, err := filepath.Rel(b.Config.Path, extraIncludePath)
|
rel, err := filepath.Rel(b.Config.Path, extraIncludePath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -2,16 +2,17 @@ package mutator_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"runtime"
|
"runtime"
|
||||||
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle"
|
"github.com/databricks/cli/bundle"
|
||||||
"github.com/databricks/cli/bundle/config"
|
"github.com/databricks/cli/bundle/config"
|
||||||
"github.com/databricks/cli/bundle/config/mutator"
|
"github.com/databricks/cli/bundle/config/mutator"
|
||||||
|
"github.com/databricks/cli/bundle/env"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
@ -23,12 +24,12 @@ func touch(t *testing.T, path, file string) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestProcessRootIncludesEmpty(t *testing.T) {
|
func TestProcessRootIncludesEmpty(t *testing.T) {
|
||||||
bundle := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
Path: ".",
|
Path: ".",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
err := mutator.ProcessRootIncludes().Apply(context.Background(), bundle)
|
err := bundle.Apply(context.Background(), b, mutator.ProcessRootIncludes())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -40,7 +41,7 @@ func TestProcessRootIncludesAbs(t *testing.T) {
|
||||||
t.Skip("skipping temperorilty to make windows unit tests green")
|
t.Skip("skipping temperorilty to make windows unit tests green")
|
||||||
}
|
}
|
||||||
|
|
||||||
bundle := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
Path: ".",
|
Path: ".",
|
||||||
Include: []string{
|
Include: []string{
|
||||||
|
@ -48,13 +49,13 @@ func TestProcessRootIncludesAbs(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
err := mutator.ProcessRootIncludes().Apply(context.Background(), bundle)
|
err := bundle.Apply(context.Background(), b, mutator.ProcessRootIncludes())
|
||||||
require.Error(t, err)
|
require.Error(t, err)
|
||||||
assert.Contains(t, err.Error(), "must be relative paths")
|
assert.Contains(t, err.Error(), "must be relative paths")
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestProcessRootIncludesSingleGlob(t *testing.T) {
|
func TestProcessRootIncludesSingleGlob(t *testing.T) {
|
||||||
bundle := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
Path: t.TempDir(),
|
Path: t.TempDir(),
|
||||||
Include: []string{
|
Include: []string{
|
||||||
|
@ -63,18 +64,18 @@ func TestProcessRootIncludesSingleGlob(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
touch(t, bundle.Config.Path, "databricks.yml")
|
touch(t, b.Config.Path, "databricks.yml")
|
||||||
touch(t, bundle.Config.Path, "a.yml")
|
touch(t, b.Config.Path, "a.yml")
|
||||||
touch(t, bundle.Config.Path, "b.yml")
|
touch(t, b.Config.Path, "b.yml")
|
||||||
|
|
||||||
err := mutator.ProcessRootIncludes().Apply(context.Background(), bundle)
|
err := bundle.Apply(context.Background(), b, mutator.ProcessRootIncludes())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
assert.Equal(t, []string{"a.yml", "b.yml"}, bundle.Config.Include)
|
assert.Equal(t, []string{"a.yml", "b.yml"}, b.Config.Include)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestProcessRootIncludesMultiGlob(t *testing.T) {
|
func TestProcessRootIncludesMultiGlob(t *testing.T) {
|
||||||
bundle := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
Path: t.TempDir(),
|
Path: t.TempDir(),
|
||||||
Include: []string{
|
Include: []string{
|
||||||
|
@ -84,17 +85,17 @@ func TestProcessRootIncludesMultiGlob(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
touch(t, bundle.Config.Path, "a1.yml")
|
touch(t, b.Config.Path, "a1.yml")
|
||||||
touch(t, bundle.Config.Path, "b1.yml")
|
touch(t, b.Config.Path, "b1.yml")
|
||||||
|
|
||||||
err := mutator.ProcessRootIncludes().Apply(context.Background(), bundle)
|
err := bundle.Apply(context.Background(), b, mutator.ProcessRootIncludes())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
assert.Equal(t, []string{"a1.yml", "b1.yml"}, bundle.Config.Include)
|
assert.Equal(t, []string{"a1.yml", "b1.yml"}, b.Config.Include)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestProcessRootIncludesRemoveDups(t *testing.T) {
|
func TestProcessRootIncludesRemoveDups(t *testing.T) {
|
||||||
bundle := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
Path: t.TempDir(),
|
Path: t.TempDir(),
|
||||||
Include: []string{
|
Include: []string{
|
||||||
|
@ -104,15 +105,15 @@ func TestProcessRootIncludesRemoveDups(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
touch(t, bundle.Config.Path, "a.yml")
|
touch(t, b.Config.Path, "a.yml")
|
||||||
|
|
||||||
err := mutator.ProcessRootIncludes().Apply(context.Background(), bundle)
|
err := bundle.Apply(context.Background(), b, mutator.ProcessRootIncludes())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, []string{"a.yml"}, bundle.Config.Include)
|
assert.Equal(t, []string{"a.yml"}, b.Config.Include)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestProcessRootIncludesNotExists(t *testing.T) {
|
func TestProcessRootIncludesNotExists(t *testing.T) {
|
||||||
bundle := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
Path: t.TempDir(),
|
Path: t.TempDir(),
|
||||||
Include: []string{
|
Include: []string{
|
||||||
|
@ -120,7 +121,7 @@ func TestProcessRootIncludesNotExists(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
err := mutator.ProcessRootIncludes().Apply(context.Background(), bundle)
|
err := bundle.Apply(context.Background(), b, mutator.ProcessRootIncludes())
|
||||||
require.Error(t, err)
|
require.Error(t, err)
|
||||||
assert.Contains(t, err.Error(), "notexist.yml defined in 'include' section does not match any files")
|
assert.Contains(t, err.Error(), "notexist.yml defined in 'include' section does not match any files")
|
||||||
}
|
}
|
||||||
|
@ -129,35 +130,38 @@ func TestProcessRootIncludesExtrasFromEnvVar(t *testing.T) {
|
||||||
rootPath := t.TempDir()
|
rootPath := t.TempDir()
|
||||||
testYamlName := "extra_include_path.yml"
|
testYamlName := "extra_include_path.yml"
|
||||||
touch(t, rootPath, testYamlName)
|
touch(t, rootPath, testYamlName)
|
||||||
os.Setenv(bundle.ExtraIncludePathsKey, path.Join(rootPath, testYamlName))
|
t.Setenv(env.IncludesVariable, path.Join(rootPath, testYamlName))
|
||||||
t.Cleanup(func() {
|
|
||||||
os.Unsetenv(bundle.ExtraIncludePathsKey)
|
|
||||||
})
|
|
||||||
|
|
||||||
bundle := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
Path: rootPath,
|
Path: rootPath,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
err := mutator.ProcessRootIncludes().Apply(context.Background(), bundle)
|
err := bundle.Apply(context.Background(), b, mutator.ProcessRootIncludes())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Contains(t, bundle.Config.Include, testYamlName)
|
assert.Contains(t, b.Config.Include, testYamlName)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestProcessRootIncludesDedupExtrasFromEnvVar(t *testing.T) {
|
func TestProcessRootIncludesDedupExtrasFromEnvVar(t *testing.T) {
|
||||||
rootPath := t.TempDir()
|
rootPath := t.TempDir()
|
||||||
testYamlName := "extra_include_path.yml"
|
testYamlName := "extra_include_path.yml"
|
||||||
touch(t, rootPath, testYamlName)
|
touch(t, rootPath, testYamlName)
|
||||||
t.Setenv(bundle.ExtraIncludePathsKey, fmt.Sprintf("%s%s%s", path.Join(rootPath, testYamlName), string(os.PathListSeparator), path.Join(rootPath, testYamlName)))
|
t.Setenv(env.IncludesVariable, strings.Join(
|
||||||
|
[]string{
|
||||||
|
path.Join(rootPath, testYamlName),
|
||||||
|
path.Join(rootPath, testYamlName),
|
||||||
|
},
|
||||||
|
string(os.PathListSeparator),
|
||||||
|
))
|
||||||
|
|
||||||
bundle := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
Path: rootPath,
|
Path: rootPath,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
err := mutator.ProcessRootIncludes().Apply(context.Background(), bundle)
|
err := bundle.Apply(context.Background(), b, mutator.ProcessRootIncludes())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, []string{testYamlName}, bundle.Config.Include)
|
assert.Equal(t, []string{testYamlName}, b.Config.Include)
|
||||||
}
|
}
|
||||||
|
|
|
@ -8,21 +8,22 @@ import (
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle"
|
"github.com/databricks/cli/bundle"
|
||||||
"github.com/databricks/cli/bundle/config"
|
"github.com/databricks/cli/bundle/config"
|
||||||
"github.com/databricks/databricks-sdk-go/service/iam"
|
"github.com/databricks/cli/libs/auth"
|
||||||
|
"github.com/databricks/cli/libs/log"
|
||||||
"github.com/databricks/databricks-sdk-go/service/jobs"
|
"github.com/databricks/databricks-sdk-go/service/jobs"
|
||||||
"github.com/databricks/databricks-sdk-go/service/ml"
|
"github.com/databricks/databricks-sdk-go/service/ml"
|
||||||
)
|
)
|
||||||
|
|
||||||
type processEnvironmentMode struct{}
|
type processTargetMode struct{}
|
||||||
|
|
||||||
const developmentConcurrentRuns = 4
|
const developmentConcurrentRuns = 4
|
||||||
|
|
||||||
func ProcessEnvironmentMode() bundle.Mutator {
|
func ProcessTargetMode() bundle.Mutator {
|
||||||
return &processEnvironmentMode{}
|
return &processTargetMode{}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *processEnvironmentMode) Name() string {
|
func (m *processTargetMode) Name() string {
|
||||||
return "ProcessEnvironmentMode"
|
return "ProcessTargetMode"
|
||||||
}
|
}
|
||||||
|
|
||||||
// Mark all resources as being for 'development' purposes, i.e.
|
// Mark all resources as being for 'development' purposes, i.e.
|
||||||
|
@ -31,24 +32,32 @@ func (m *processEnvironmentMode) Name() string {
|
||||||
func transformDevelopmentMode(b *bundle.Bundle) error {
|
func transformDevelopmentMode(b *bundle.Bundle) error {
|
||||||
r := b.Config.Resources
|
r := b.Config.Resources
|
||||||
|
|
||||||
prefix := "[dev " + b.Config.Workspace.CurrentUser.ShortName + "] "
|
shortName := b.Config.Workspace.CurrentUser.ShortName
|
||||||
|
prefix := "[dev " + shortName + "] "
|
||||||
|
|
||||||
|
// Generate a normalized version of the short name that can be used as a tag value.
|
||||||
|
tagValue := b.Tagging.NormalizeValue(shortName)
|
||||||
|
|
||||||
for i := range r.Jobs {
|
for i := range r.Jobs {
|
||||||
r.Jobs[i].Name = prefix + r.Jobs[i].Name
|
r.Jobs[i].Name = prefix + r.Jobs[i].Name
|
||||||
if r.Jobs[i].Tags == nil {
|
if r.Jobs[i].Tags == nil {
|
||||||
r.Jobs[i].Tags = make(map[string]string)
|
r.Jobs[i].Tags = make(map[string]string)
|
||||||
}
|
}
|
||||||
r.Jobs[i].Tags["dev"] = b.Config.Workspace.CurrentUser.DisplayName
|
r.Jobs[i].Tags["dev"] = tagValue
|
||||||
if r.Jobs[i].MaxConcurrentRuns == 0 {
|
if r.Jobs[i].MaxConcurrentRuns == 0 {
|
||||||
r.Jobs[i].MaxConcurrentRuns = developmentConcurrentRuns
|
r.Jobs[i].MaxConcurrentRuns = developmentConcurrentRuns
|
||||||
}
|
}
|
||||||
if r.Jobs[i].Schedule != nil {
|
|
||||||
|
// Pause each job. As an exception, we don't pause jobs that are explicitly
|
||||||
|
// marked as "unpaused". This allows users to override the default behavior
|
||||||
|
// of the development mode.
|
||||||
|
if r.Jobs[i].Schedule != nil && r.Jobs[i].Schedule.PauseStatus != jobs.PauseStatusUnpaused {
|
||||||
r.Jobs[i].Schedule.PauseStatus = jobs.PauseStatusPaused
|
r.Jobs[i].Schedule.PauseStatus = jobs.PauseStatusPaused
|
||||||
}
|
}
|
||||||
if r.Jobs[i].Continuous != nil {
|
if r.Jobs[i].Continuous != nil && r.Jobs[i].Continuous.PauseStatus != jobs.PauseStatusUnpaused {
|
||||||
r.Jobs[i].Continuous.PauseStatus = jobs.PauseStatusPaused
|
r.Jobs[i].Continuous.PauseStatus = jobs.PauseStatusPaused
|
||||||
}
|
}
|
||||||
if r.Jobs[i].Trigger != nil {
|
if r.Jobs[i].Trigger != nil && r.Jobs[i].Trigger.PauseStatus != jobs.PauseStatusUnpaused {
|
||||||
r.Jobs[i].Trigger.PauseStatus = jobs.PauseStatusPaused
|
r.Jobs[i].Trigger.PauseStatus = jobs.PauseStatusPaused
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -73,7 +82,19 @@ func transformDevelopmentMode(b *bundle.Bundle) error {
|
||||||
} else {
|
} else {
|
||||||
r.Experiments[i].Name = dir + "/" + prefix + base
|
r.Experiments[i].Name = dir + "/" + prefix + base
|
||||||
}
|
}
|
||||||
r.Experiments[i].Tags = append(r.Experiments[i].Tags, ml.ExperimentTag{Key: "dev", Value: b.Config.Workspace.CurrentUser.DisplayName})
|
r.Experiments[i].Tags = append(r.Experiments[i].Tags, ml.ExperimentTag{Key: "dev", Value: tagValue})
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := range r.ModelServingEndpoints {
|
||||||
|
prefix = "dev_" + b.Config.Workspace.CurrentUser.ShortName + "_"
|
||||||
|
r.ModelServingEndpoints[i].Name = prefix + r.ModelServingEndpoints[i].Name
|
||||||
|
// (model serving doesn't yet support tags)
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := range r.RegisteredModels {
|
||||||
|
prefix = "dev_" + b.Config.Workspace.CurrentUser.ShortName + "_"
|
||||||
|
r.RegisteredModels[i].Name = prefix + r.RegisteredModels[i].Name
|
||||||
|
// (registered models in Unity Catalog don't yet support tags)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
@ -99,25 +120,25 @@ func findIncorrectPath(b *bundle.Bundle, mode config.Mode) string {
|
||||||
if strings.Contains(b.Config.Workspace.StatePath, username) != containsExpected {
|
if strings.Contains(b.Config.Workspace.StatePath, username) != containsExpected {
|
||||||
return "state_path"
|
return "state_path"
|
||||||
}
|
}
|
||||||
if strings.Contains(b.Config.Workspace.FilesPath, username) != containsExpected {
|
if strings.Contains(b.Config.Workspace.FilePath, username) != containsExpected {
|
||||||
return "files_path"
|
return "file_path"
|
||||||
}
|
}
|
||||||
if strings.Contains(b.Config.Workspace.ArtifactsPath, username) != containsExpected {
|
if strings.Contains(b.Config.Workspace.ArtifactPath, username) != containsExpected {
|
||||||
return "artifacts_path"
|
return "artifact_path"
|
||||||
}
|
}
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
|
||||||
func validateProductionMode(ctx context.Context, b *bundle.Bundle, isPrincipalUsed bool) error {
|
func validateProductionMode(ctx context.Context, b *bundle.Bundle, isPrincipalUsed bool) error {
|
||||||
if b.Config.Bundle.Git.Inferred {
|
if b.Config.Bundle.Git.Inferred {
|
||||||
env := b.Config.Bundle.Environment
|
env := b.Config.Bundle.Target
|
||||||
return fmt.Errorf("environment with 'mode: production' must specify an explicit 'environments.%s.git' configuration", env)
|
log.Warnf(ctx, "target with 'mode: production' should specify an explicit 'targets.%s.git' configuration", env)
|
||||||
}
|
}
|
||||||
|
|
||||||
r := b.Config.Resources
|
r := b.Config.Resources
|
||||||
for i := range r.Pipelines {
|
for i := range r.Pipelines {
|
||||||
if r.Pipelines[i].Development {
|
if r.Pipelines[i].Development {
|
||||||
return fmt.Errorf("environment with 'mode: production' cannot specify a pipeline with 'development: true'")
|
return fmt.Errorf("target with 'mode: production' cannot specify a pipeline with 'development: true'")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -125,7 +146,7 @@ func validateProductionMode(ctx context.Context, b *bundle.Bundle, isPrincipalUs
|
||||||
if path := findIncorrectPath(b, config.Production); path != "" {
|
if path := findIncorrectPath(b, config.Production); path != "" {
|
||||||
message := "%s must not contain the current username when using 'mode: production'"
|
message := "%s must not contain the current username when using 'mode: production'"
|
||||||
if path == "root_path" {
|
if path == "root_path" {
|
||||||
return fmt.Errorf(message+"\n tip: set workspace.root_path to a shared path such as /Shared/.bundle/${bundle.name}/${bundle.environment}", path)
|
return fmt.Errorf(message+"\n tip: set workspace.root_path to a shared path such as /Shared/.bundle/${bundle.name}/${bundle.target}", path)
|
||||||
} else {
|
} else {
|
||||||
return fmt.Errorf(message, path)
|
return fmt.Errorf(message, path)
|
||||||
}
|
}
|
||||||
|
@ -138,21 +159,6 @@ func validateProductionMode(ctx context.Context, b *bundle.Bundle, isPrincipalUs
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Determines whether a service principal identity is used to run the CLI.
|
|
||||||
func isServicePrincipalUsed(ctx context.Context, b *bundle.Bundle) (bool, error) {
|
|
||||||
ws := b.WorkspaceClient()
|
|
||||||
|
|
||||||
// Check if a principal with the current user's ID exists.
|
|
||||||
// We need to use the ListAll method since Get is only usable by admins.
|
|
||||||
matches, err := ws.ServicePrincipals.ListAll(ctx, iam.ListServicePrincipalsRequest{
|
|
||||||
Filter: "id eq " + b.Config.Workspace.CurrentUser.Id,
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
return len(matches) > 0, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Determines whether run_as is explicitly set for all resources.
|
// Determines whether run_as is explicitly set for all resources.
|
||||||
// We do this in a best-effort fashion rather than check the top-level
|
// We do this in a best-effort fashion rather than check the top-level
|
||||||
// 'run_as' field because the latter is not required to be set.
|
// 'run_as' field because the latter is not required to be set.
|
||||||
|
@ -165,7 +171,7 @@ func isRunAsSet(r config.Resources) bool {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *processEnvironmentMode) Apply(ctx context.Context, b *bundle.Bundle) error {
|
func (m *processTargetMode) Apply(ctx context.Context, b *bundle.Bundle) error {
|
||||||
switch b.Config.Bundle.Mode {
|
switch b.Config.Bundle.Mode {
|
||||||
case config.Development:
|
case config.Development:
|
||||||
err := validateDevelopmentMode(b)
|
err := validateDevelopmentMode(b)
|
||||||
|
@ -174,15 +180,12 @@ func (m *processEnvironmentMode) Apply(ctx context.Context, b *bundle.Bundle) er
|
||||||
}
|
}
|
||||||
return transformDevelopmentMode(b)
|
return transformDevelopmentMode(b)
|
||||||
case config.Production:
|
case config.Production:
|
||||||
isPrincipal, err := isServicePrincipalUsed(ctx, b)
|
isPrincipal := auth.IsServicePrincipal(b.Config.Workspace.CurrentUser.UserName)
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return validateProductionMode(ctx, b, isPrincipal)
|
return validateProductionMode(ctx, b, isPrincipal)
|
||||||
case "":
|
case "":
|
||||||
// No action
|
// No action
|
||||||
default:
|
default:
|
||||||
return fmt.Errorf("unsupported value specified for 'mode': %s", b.Config.Bundle.Mode)
|
return fmt.Errorf("unsupported value '%s' specified for 'mode': must be either 'development' or 'production'", b.Config.Bundle.Mode)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
|
@ -0,0 +1,302 @@
|
||||||
|
package mutator
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"reflect"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/databricks/cli/bundle"
|
||||||
|
"github.com/databricks/cli/bundle/config"
|
||||||
|
"github.com/databricks/cli/bundle/config/resources"
|
||||||
|
"github.com/databricks/cli/libs/tags"
|
||||||
|
sdkconfig "github.com/databricks/databricks-sdk-go/config"
|
||||||
|
"github.com/databricks/databricks-sdk-go/service/catalog"
|
||||||
|
"github.com/databricks/databricks-sdk-go/service/iam"
|
||||||
|
"github.com/databricks/databricks-sdk-go/service/jobs"
|
||||||
|
"github.com/databricks/databricks-sdk-go/service/ml"
|
||||||
|
"github.com/databricks/databricks-sdk-go/service/pipelines"
|
||||||
|
"github.com/databricks/databricks-sdk-go/service/serving"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func mockBundle(mode config.Mode) *bundle.Bundle {
|
||||||
|
return &bundle.Bundle{
|
||||||
|
Config: config.Root{
|
||||||
|
Bundle: config.Bundle{
|
||||||
|
Mode: mode,
|
||||||
|
Git: config.Git{
|
||||||
|
OriginURL: "http://origin",
|
||||||
|
Branch: "main",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Workspace: config.Workspace{
|
||||||
|
CurrentUser: &config.User{
|
||||||
|
ShortName: "lennart",
|
||||||
|
User: &iam.User{
|
||||||
|
UserName: "lennart@company.com",
|
||||||
|
Id: "1",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
StatePath: "/Users/lennart@company.com/.bundle/x/y/state",
|
||||||
|
ArtifactPath: "/Users/lennart@company.com/.bundle/x/y/artifacts",
|
||||||
|
FilePath: "/Users/lennart@company.com/.bundle/x/y/files",
|
||||||
|
},
|
||||||
|
Resources: config.Resources{
|
||||||
|
Jobs: map[string]*resources.Job{
|
||||||
|
"job1": {
|
||||||
|
JobSettings: &jobs.JobSettings{
|
||||||
|
Name: "job1",
|
||||||
|
Schedule: &jobs.CronSchedule{
|
||||||
|
QuartzCronExpression: "* * * * *",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"job2": {
|
||||||
|
JobSettings: &jobs.JobSettings{
|
||||||
|
Name: "job2",
|
||||||
|
Schedule: &jobs.CronSchedule{
|
||||||
|
QuartzCronExpression: "* * * * *",
|
||||||
|
PauseStatus: jobs.PauseStatusUnpaused,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"job3": {
|
||||||
|
JobSettings: &jobs.JobSettings{
|
||||||
|
Name: "job3",
|
||||||
|
Trigger: &jobs.TriggerSettings{
|
||||||
|
FileArrival: &jobs.FileArrivalTriggerConfiguration{
|
||||||
|
Url: "test.com",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"job4": {
|
||||||
|
JobSettings: &jobs.JobSettings{
|
||||||
|
Name: "job4",
|
||||||
|
Continuous: &jobs.Continuous{
|
||||||
|
PauseStatus: jobs.PauseStatusPaused,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Pipelines: map[string]*resources.Pipeline{
|
||||||
|
"pipeline1": {PipelineSpec: &pipelines.PipelineSpec{Name: "pipeline1"}},
|
||||||
|
},
|
||||||
|
Experiments: map[string]*resources.MlflowExperiment{
|
||||||
|
"experiment1": {Experiment: &ml.Experiment{Name: "/Users/lennart.kats@databricks.com/experiment1"}},
|
||||||
|
"experiment2": {Experiment: &ml.Experiment{Name: "experiment2"}},
|
||||||
|
},
|
||||||
|
Models: map[string]*resources.MlflowModel{
|
||||||
|
"model1": {Model: &ml.Model{Name: "model1"}},
|
||||||
|
},
|
||||||
|
ModelServingEndpoints: map[string]*resources.ModelServingEndpoint{
|
||||||
|
"servingendpoint1": {CreateServingEndpoint: &serving.CreateServingEndpoint{Name: "servingendpoint1"}},
|
||||||
|
},
|
||||||
|
RegisteredModels: map[string]*resources.RegisteredModel{
|
||||||
|
"registeredmodel1": {CreateRegisteredModelRequest: &catalog.CreateRegisteredModelRequest{Name: "registeredmodel1"}},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
// Use AWS implementation for testing.
|
||||||
|
Tagging: tags.ForCloud(&sdkconfig.Config{
|
||||||
|
Host: "https://company.cloud.databricks.com",
|
||||||
|
}),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestProcessTargetModeDevelopment(t *testing.T) {
|
||||||
|
b := mockBundle(config.Development)
|
||||||
|
|
||||||
|
m := ProcessTargetMode()
|
||||||
|
err := bundle.Apply(context.Background(), b, m)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Job 1
|
||||||
|
assert.Equal(t, "[dev lennart] job1", b.Config.Resources.Jobs["job1"].Name)
|
||||||
|
assert.Equal(t, b.Config.Resources.Jobs["job1"].Tags["dev"], "lennart")
|
||||||
|
assert.Equal(t, b.Config.Resources.Jobs["job1"].Schedule.PauseStatus, jobs.PauseStatusPaused)
|
||||||
|
|
||||||
|
// Job 2
|
||||||
|
assert.Equal(t, "[dev lennart] job2", b.Config.Resources.Jobs["job2"].Name)
|
||||||
|
assert.Equal(t, b.Config.Resources.Jobs["job2"].Tags["dev"], "lennart")
|
||||||
|
assert.Equal(t, b.Config.Resources.Jobs["job2"].Schedule.PauseStatus, jobs.PauseStatusUnpaused)
|
||||||
|
|
||||||
|
// Pipeline 1
|
||||||
|
assert.Equal(t, "[dev lennart] pipeline1", b.Config.Resources.Pipelines["pipeline1"].Name)
|
||||||
|
assert.True(t, b.Config.Resources.Pipelines["pipeline1"].PipelineSpec.Development)
|
||||||
|
|
||||||
|
// Experiment 1
|
||||||
|
assert.Equal(t, "/Users/lennart.kats@databricks.com/[dev lennart] experiment1", b.Config.Resources.Experiments["experiment1"].Name)
|
||||||
|
assert.Contains(t, b.Config.Resources.Experiments["experiment1"].Experiment.Tags, ml.ExperimentTag{Key: "dev", Value: "lennart"})
|
||||||
|
assert.Equal(t, "dev", b.Config.Resources.Experiments["experiment1"].Experiment.Tags[0].Key)
|
||||||
|
|
||||||
|
// Experiment 2
|
||||||
|
assert.Equal(t, "[dev lennart] experiment2", b.Config.Resources.Experiments["experiment2"].Name)
|
||||||
|
assert.Contains(t, b.Config.Resources.Experiments["experiment2"].Experiment.Tags, ml.ExperimentTag{Key: "dev", Value: "lennart"})
|
||||||
|
|
||||||
|
// Model 1
|
||||||
|
assert.Equal(t, "[dev lennart] model1", b.Config.Resources.Models["model1"].Name)
|
||||||
|
|
||||||
|
// Model serving endpoint 1
|
||||||
|
assert.Equal(t, "dev_lennart_servingendpoint1", b.Config.Resources.ModelServingEndpoints["servingendpoint1"].Name)
|
||||||
|
|
||||||
|
// Registered model 1
|
||||||
|
assert.Equal(t, "dev_lennart_registeredmodel1", b.Config.Resources.RegisteredModels["registeredmodel1"].Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestProcessTargetModeDevelopmentTagNormalizationForAws(t *testing.T) {
|
||||||
|
b := mockBundle(config.Development)
|
||||||
|
b.Tagging = tags.ForCloud(&sdkconfig.Config{
|
||||||
|
Host: "https://dbc-XXXXXXXX-YYYY.cloud.databricks.com/",
|
||||||
|
})
|
||||||
|
|
||||||
|
b.Config.Workspace.CurrentUser.ShortName = "Héllö wörld?!"
|
||||||
|
err := bundle.Apply(context.Background(), b, ProcessTargetMode())
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Assert that tag normalization took place.
|
||||||
|
assert.Equal(t, "Hello world__", b.Config.Resources.Jobs["job1"].Tags["dev"])
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestProcessTargetModeDevelopmentTagNormalizationForAzure(t *testing.T) {
|
||||||
|
b := mockBundle(config.Development)
|
||||||
|
b.Tagging = tags.ForCloud(&sdkconfig.Config{
|
||||||
|
Host: "https://adb-xxx.y.azuredatabricks.net/",
|
||||||
|
})
|
||||||
|
|
||||||
|
b.Config.Workspace.CurrentUser.ShortName = "Héllö wörld?!"
|
||||||
|
err := bundle.Apply(context.Background(), b, ProcessTargetMode())
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Assert that tag normalization took place (Azure allows more characters than AWS).
|
||||||
|
assert.Equal(t, "Héllö wörld?!", b.Config.Resources.Jobs["job1"].Tags["dev"])
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestProcessTargetModeDevelopmentTagNormalizationForGcp(t *testing.T) {
|
||||||
|
b := mockBundle(config.Development)
|
||||||
|
b.Tagging = tags.ForCloud(&sdkconfig.Config{
|
||||||
|
Host: "https://123.4.gcp.databricks.com/",
|
||||||
|
})
|
||||||
|
|
||||||
|
b.Config.Workspace.CurrentUser.ShortName = "Héllö wörld?!"
|
||||||
|
err := bundle.Apply(context.Background(), b, ProcessTargetMode())
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Assert that tag normalization took place.
|
||||||
|
assert.Equal(t, "Hello_world", b.Config.Resources.Jobs["job1"].Tags["dev"])
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestProcessTargetModeDefault(t *testing.T) {
|
||||||
|
b := mockBundle("")
|
||||||
|
|
||||||
|
m := ProcessTargetMode()
|
||||||
|
err := bundle.Apply(context.Background(), b, m)
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Equal(t, "job1", b.Config.Resources.Jobs["job1"].Name)
|
||||||
|
assert.Equal(t, "pipeline1", b.Config.Resources.Pipelines["pipeline1"].Name)
|
||||||
|
assert.False(t, b.Config.Resources.Pipelines["pipeline1"].PipelineSpec.Development)
|
||||||
|
assert.Equal(t, "servingendpoint1", b.Config.Resources.ModelServingEndpoints["servingendpoint1"].Name)
|
||||||
|
assert.Equal(t, "registeredmodel1", b.Config.Resources.RegisteredModels["registeredmodel1"].Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestProcessTargetModeProduction(t *testing.T) {
|
||||||
|
b := mockBundle(config.Production)
|
||||||
|
|
||||||
|
err := validateProductionMode(context.Background(), b, false)
|
||||||
|
require.ErrorContains(t, err, "state_path")
|
||||||
|
|
||||||
|
b.Config.Workspace.StatePath = "/Shared/.bundle/x/y/state"
|
||||||
|
b.Config.Workspace.ArtifactPath = "/Shared/.bundle/x/y/artifacts"
|
||||||
|
b.Config.Workspace.FilePath = "/Shared/.bundle/x/y/files"
|
||||||
|
|
||||||
|
err = validateProductionMode(context.Background(), b, false)
|
||||||
|
require.ErrorContains(t, err, "production")
|
||||||
|
|
||||||
|
permissions := []resources.Permission{
|
||||||
|
{
|
||||||
|
Level: "CAN_MANAGE",
|
||||||
|
UserName: "user@company.com",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
b.Config.Resources.Jobs["job1"].Permissions = permissions
|
||||||
|
b.Config.Resources.Jobs["job1"].RunAs = &jobs.JobRunAs{UserName: "user@company.com"}
|
||||||
|
b.Config.Resources.Jobs["job2"].RunAs = &jobs.JobRunAs{UserName: "user@company.com"}
|
||||||
|
b.Config.Resources.Jobs["job3"].RunAs = &jobs.JobRunAs{UserName: "user@company.com"}
|
||||||
|
b.Config.Resources.Jobs["job4"].RunAs = &jobs.JobRunAs{UserName: "user@company.com"}
|
||||||
|
b.Config.Resources.Pipelines["pipeline1"].Permissions = permissions
|
||||||
|
b.Config.Resources.Experiments["experiment1"].Permissions = permissions
|
||||||
|
b.Config.Resources.Experiments["experiment2"].Permissions = permissions
|
||||||
|
b.Config.Resources.Models["model1"].Permissions = permissions
|
||||||
|
b.Config.Resources.ModelServingEndpoints["servingendpoint1"].Permissions = permissions
|
||||||
|
|
||||||
|
err = validateProductionMode(context.Background(), b, false)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
assert.Equal(t, "job1", b.Config.Resources.Jobs["job1"].Name)
|
||||||
|
assert.Equal(t, "pipeline1", b.Config.Resources.Pipelines["pipeline1"].Name)
|
||||||
|
assert.False(t, b.Config.Resources.Pipelines["pipeline1"].PipelineSpec.Development)
|
||||||
|
assert.Equal(t, "servingendpoint1", b.Config.Resources.ModelServingEndpoints["servingendpoint1"].Name)
|
||||||
|
assert.Equal(t, "registeredmodel1", b.Config.Resources.RegisteredModels["registeredmodel1"].Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestProcessTargetModeProductionOkForPrincipal(t *testing.T) {
|
||||||
|
b := mockBundle(config.Production)
|
||||||
|
|
||||||
|
// Our target has all kinds of problems when not using service principals ...
|
||||||
|
err := validateProductionMode(context.Background(), b, false)
|
||||||
|
require.Error(t, err)
|
||||||
|
|
||||||
|
// ... but we're much less strict when a principal is used
|
||||||
|
err = validateProductionMode(context.Background(), b, true)
|
||||||
|
require.NoError(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Make sure that we have test coverage for all resource types
|
||||||
|
func TestAllResourcesMocked(t *testing.T) {
|
||||||
|
b := mockBundle(config.Development)
|
||||||
|
resources := reflect.ValueOf(b.Config.Resources)
|
||||||
|
|
||||||
|
for i := 0; i < resources.NumField(); i++ {
|
||||||
|
field := resources.Field(i)
|
||||||
|
if field.Kind() == reflect.Map {
|
||||||
|
assert.True(
|
||||||
|
t,
|
||||||
|
!field.IsNil() && field.Len() > 0,
|
||||||
|
"process_target_mode should support '%s' (please add it to process_target_mode.go and extend the test suite)",
|
||||||
|
resources.Type().Field(i).Name,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Make sure that we at least rename all resources
|
||||||
|
func TestAllResourcesRenamed(t *testing.T) {
|
||||||
|
b := mockBundle(config.Development)
|
||||||
|
resources := reflect.ValueOf(b.Config.Resources)
|
||||||
|
|
||||||
|
m := ProcessTargetMode()
|
||||||
|
err := bundle.Apply(context.Background(), b, m)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
for i := 0; i < resources.NumField(); i++ {
|
||||||
|
field := resources.Field(i)
|
||||||
|
|
||||||
|
if field.Kind() == reflect.Map {
|
||||||
|
for _, key := range field.MapKeys() {
|
||||||
|
resource := field.MapIndex(key)
|
||||||
|
nameField := resource.Elem().FieldByName("Name")
|
||||||
|
if nameField.IsValid() && nameField.Kind() == reflect.String {
|
||||||
|
assert.True(
|
||||||
|
t,
|
||||||
|
strings.Contains(nameField.String(), "dev"),
|
||||||
|
"process_target_mode should rename '%s' in '%s'",
|
||||||
|
key,
|
||||||
|
resources.Type().Field(i).Name,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,65 @@
|
||||||
|
package mutator
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"slices"
|
||||||
|
|
||||||
|
"github.com/databricks/cli/bundle"
|
||||||
|
"github.com/databricks/cli/bundle/config/resources"
|
||||||
|
"github.com/databricks/databricks-sdk-go/service/jobs"
|
||||||
|
)
|
||||||
|
|
||||||
|
type setRunAs struct {
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetRunAs mutator is used to go over defined resources such as Jobs and DLT Pipelines
|
||||||
|
// And set correct execution identity ("run_as" for a job or "is_owner" permission for DLT)
|
||||||
|
// if top-level "run-as" section is defined in the configuration.
|
||||||
|
func SetRunAs() bundle.Mutator {
|
||||||
|
return &setRunAs{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *setRunAs) Name() string {
|
||||||
|
return "SetRunAs"
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *setRunAs) Apply(_ context.Context, b *bundle.Bundle) error {
|
||||||
|
runAs := b.Config.RunAs
|
||||||
|
if runAs == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := range b.Config.Resources.Jobs {
|
||||||
|
job := b.Config.Resources.Jobs[i]
|
||||||
|
if job.RunAs != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
job.RunAs = &jobs.JobRunAs{
|
||||||
|
ServicePrincipalName: runAs.ServicePrincipalName,
|
||||||
|
UserName: runAs.UserName,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
me := b.Config.Workspace.CurrentUser.UserName
|
||||||
|
// If user deploying the bundle and the one defined in run_as are the same
|
||||||
|
// Do not add IS_OWNER permission. Current user is implied to be an owner in this case.
|
||||||
|
// Otherwise, it will fail due to this bug https://github.com/databricks/terraform-provider-databricks/issues/2407
|
||||||
|
if runAs.UserName == me || runAs.ServicePrincipalName == me {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := range b.Config.Resources.Pipelines {
|
||||||
|
pipeline := b.Config.Resources.Pipelines[i]
|
||||||
|
pipeline.Permissions = slices.DeleteFunc(pipeline.Permissions, func(p resources.Permission) bool {
|
||||||
|
return (runAs.ServicePrincipalName != "" && p.ServicePrincipalName == runAs.ServicePrincipalName) ||
|
||||||
|
(runAs.UserName != "" && p.UserName == runAs.UserName)
|
||||||
|
})
|
||||||
|
pipeline.Permissions = append(pipeline.Permissions, resources.Permission{
|
||||||
|
Level: "IS_OWNER",
|
||||||
|
ServicePrincipalName: runAs.ServicePrincipalName,
|
||||||
|
UserName: runAs.UserName,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
|
@ -1,54 +0,0 @@
|
||||||
package mutator
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle"
|
|
||||||
"golang.org/x/exp/maps"
|
|
||||||
)
|
|
||||||
|
|
||||||
type selectDefaultEnvironment struct{}
|
|
||||||
|
|
||||||
// SelectDefaultEnvironment merges the default environment into the root configuration.
|
|
||||||
func SelectDefaultEnvironment() bundle.Mutator {
|
|
||||||
return &selectDefaultEnvironment{}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *selectDefaultEnvironment) Name() string {
|
|
||||||
return "SelectDefaultEnvironment"
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *selectDefaultEnvironment) Apply(ctx context.Context, b *bundle.Bundle) error {
|
|
||||||
if len(b.Config.Environments) == 0 {
|
|
||||||
return fmt.Errorf("no environments defined")
|
|
||||||
}
|
|
||||||
|
|
||||||
// One environment means there's only one default.
|
|
||||||
names := maps.Keys(b.Config.Environments)
|
|
||||||
if len(names) == 1 {
|
|
||||||
return SelectEnvironment(names[0]).Apply(ctx, b)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Multiple environments means we look for the `default` flag.
|
|
||||||
var defaults []string
|
|
||||||
for name, env := range b.Config.Environments {
|
|
||||||
if env != nil && env.Default {
|
|
||||||
defaults = append(defaults, name)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// It is invalid to have multiple environments with the `default` flag set.
|
|
||||||
if len(defaults) > 1 {
|
|
||||||
return fmt.Errorf("multiple environments are marked as default (%s)", strings.Join(defaults, ", "))
|
|
||||||
}
|
|
||||||
|
|
||||||
// If no environment has the `default` flag set, ask the user to specify one.
|
|
||||||
if len(defaults) == 0 {
|
|
||||||
return fmt.Errorf("please specify environment")
|
|
||||||
}
|
|
||||||
|
|
||||||
// One default remaining.
|
|
||||||
return SelectEnvironment(defaults[0]).Apply(ctx, b)
|
|
||||||
}
|
|
|
@ -1,90 +0,0 @@
|
||||||
package mutator_test
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle"
|
|
||||||
"github.com/databricks/cli/bundle/config"
|
|
||||||
"github.com/databricks/cli/bundle/config/mutator"
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestSelectDefaultEnvironmentNoEnvironments(t *testing.T) {
|
|
||||||
bundle := &bundle.Bundle{
|
|
||||||
Config: config.Root{
|
|
||||||
Environments: map[string]*config.Environment{},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
err := mutator.SelectDefaultEnvironment().Apply(context.Background(), bundle)
|
|
||||||
assert.ErrorContains(t, err, "no environments defined")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestSelectDefaultEnvironmentSingleEnvironments(t *testing.T) {
|
|
||||||
bundle := &bundle.Bundle{
|
|
||||||
Config: config.Root{
|
|
||||||
Environments: map[string]*config.Environment{
|
|
||||||
"foo": {},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
err := mutator.SelectDefaultEnvironment().Apply(context.Background(), bundle)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.Equal(t, "foo", bundle.Config.Bundle.Environment)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestSelectDefaultEnvironmentNoDefaults(t *testing.T) {
|
|
||||||
bundle := &bundle.Bundle{
|
|
||||||
Config: config.Root{
|
|
||||||
Environments: map[string]*config.Environment{
|
|
||||||
"foo": {},
|
|
||||||
"bar": {},
|
|
||||||
"qux": {},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
err := mutator.SelectDefaultEnvironment().Apply(context.Background(), bundle)
|
|
||||||
assert.ErrorContains(t, err, "please specify environment")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestSelectDefaultEnvironmentNoDefaultsWithNil(t *testing.T) {
|
|
||||||
bundle := &bundle.Bundle{
|
|
||||||
Config: config.Root{
|
|
||||||
Environments: map[string]*config.Environment{
|
|
||||||
"foo": nil,
|
|
||||||
"bar": nil,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
err := mutator.SelectDefaultEnvironment().Apply(context.Background(), bundle)
|
|
||||||
assert.ErrorContains(t, err, "please specify environment")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestSelectDefaultEnvironmentMultipleDefaults(t *testing.T) {
|
|
||||||
bundle := &bundle.Bundle{
|
|
||||||
Config: config.Root{
|
|
||||||
Environments: map[string]*config.Environment{
|
|
||||||
"foo": {Default: true},
|
|
||||||
"bar": {Default: true},
|
|
||||||
"qux": {Default: true},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
err := mutator.SelectDefaultEnvironment().Apply(context.Background(), bundle)
|
|
||||||
assert.ErrorContains(t, err, "multiple environments are marked as default")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestSelectDefaultEnvironmentSingleDefault(t *testing.T) {
|
|
||||||
bundle := &bundle.Bundle{
|
|
||||||
Config: config.Root{
|
|
||||||
Environments: map[string]*config.Environment{
|
|
||||||
"foo": {},
|
|
||||||
"bar": {Default: true},
|
|
||||||
"qux": {},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
err := mutator.SelectDefaultEnvironment().Apply(context.Background(), bundle)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.Equal(t, "bar", bundle.Config.Bundle.Environment)
|
|
||||||
}
|
|
|
@ -0,0 +1,54 @@
|
||||||
|
package mutator
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/databricks/cli/bundle"
|
||||||
|
"golang.org/x/exp/maps"
|
||||||
|
)
|
||||||
|
|
||||||
|
type selectDefaultTarget struct{}
|
||||||
|
|
||||||
|
// SelectDefaultTarget merges the default target into the root configuration.
|
||||||
|
func SelectDefaultTarget() bundle.Mutator {
|
||||||
|
return &selectDefaultTarget{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *selectDefaultTarget) Name() string {
|
||||||
|
return "SelectDefaultTarget"
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *selectDefaultTarget) Apply(ctx context.Context, b *bundle.Bundle) error {
|
||||||
|
if len(b.Config.Targets) == 0 {
|
||||||
|
return fmt.Errorf("no targets defined")
|
||||||
|
}
|
||||||
|
|
||||||
|
// One target means there's only one default.
|
||||||
|
names := maps.Keys(b.Config.Targets)
|
||||||
|
if len(names) == 1 {
|
||||||
|
return bundle.Apply(ctx, b, SelectTarget(names[0]))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Multiple targets means we look for the `default` flag.
|
||||||
|
var defaults []string
|
||||||
|
for name, env := range b.Config.Targets {
|
||||||
|
if env != nil && env.Default {
|
||||||
|
defaults = append(defaults, name)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// It is invalid to have multiple targets with the `default` flag set.
|
||||||
|
if len(defaults) > 1 {
|
||||||
|
return fmt.Errorf("multiple targets are marked as default (%s)", strings.Join(defaults, ", "))
|
||||||
|
}
|
||||||
|
|
||||||
|
// If no target has the `default` flag set, ask the user to specify one.
|
||||||
|
if len(defaults) == 0 {
|
||||||
|
return fmt.Errorf("please specify target")
|
||||||
|
}
|
||||||
|
|
||||||
|
// One default remaining.
|
||||||
|
return bundle.Apply(ctx, b, SelectTarget(defaults[0]))
|
||||||
|
}
|
|
@ -0,0 +1,90 @@
|
||||||
|
package mutator_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/databricks/cli/bundle"
|
||||||
|
"github.com/databricks/cli/bundle/config"
|
||||||
|
"github.com/databricks/cli/bundle/config/mutator"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestSelectDefaultTargetNoTargets(t *testing.T) {
|
||||||
|
b := &bundle.Bundle{
|
||||||
|
Config: config.Root{
|
||||||
|
Targets: map[string]*config.Target{},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
err := bundle.Apply(context.Background(), b, mutator.SelectDefaultTarget())
|
||||||
|
assert.ErrorContains(t, err, "no targets defined")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSelectDefaultTargetSingleTargets(t *testing.T) {
|
||||||
|
b := &bundle.Bundle{
|
||||||
|
Config: config.Root{
|
||||||
|
Targets: map[string]*config.Target{
|
||||||
|
"foo": {},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
err := bundle.Apply(context.Background(), b, mutator.SelectDefaultTarget())
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t, "foo", b.Config.Bundle.Target)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSelectDefaultTargetNoDefaults(t *testing.T) {
|
||||||
|
b := &bundle.Bundle{
|
||||||
|
Config: config.Root{
|
||||||
|
Targets: map[string]*config.Target{
|
||||||
|
"foo": {},
|
||||||
|
"bar": {},
|
||||||
|
"qux": {},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
err := bundle.Apply(context.Background(), b, mutator.SelectDefaultTarget())
|
||||||
|
assert.ErrorContains(t, err, "please specify target")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSelectDefaultTargetNoDefaultsWithNil(t *testing.T) {
|
||||||
|
b := &bundle.Bundle{
|
||||||
|
Config: config.Root{
|
||||||
|
Targets: map[string]*config.Target{
|
||||||
|
"foo": nil,
|
||||||
|
"bar": nil,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
err := bundle.Apply(context.Background(), b, mutator.SelectDefaultTarget())
|
||||||
|
assert.ErrorContains(t, err, "please specify target")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSelectDefaultTargetMultipleDefaults(t *testing.T) {
|
||||||
|
b := &bundle.Bundle{
|
||||||
|
Config: config.Root{
|
||||||
|
Targets: map[string]*config.Target{
|
||||||
|
"foo": {Default: true},
|
||||||
|
"bar": {Default: true},
|
||||||
|
"qux": {Default: true},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
err := bundle.Apply(context.Background(), b, mutator.SelectDefaultTarget())
|
||||||
|
assert.ErrorContains(t, err, "multiple targets are marked as default")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSelectDefaultTargetSingleDefault(t *testing.T) {
|
||||||
|
b := &bundle.Bundle{
|
||||||
|
Config: config.Root{
|
||||||
|
Targets: map[string]*config.Target{
|
||||||
|
"foo": {},
|
||||||
|
"bar": {Default: true},
|
||||||
|
"qux": {},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
err := bundle.Apply(context.Background(), b, mutator.SelectDefaultTarget())
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t, "bar", b.Config.Bundle.Target)
|
||||||
|
}
|
|
@ -1,48 +0,0 @@
|
||||||
package mutator
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle"
|
|
||||||
)
|
|
||||||
|
|
||||||
type selectEnvironment struct {
|
|
||||||
name string
|
|
||||||
}
|
|
||||||
|
|
||||||
// SelectEnvironment merges the specified environment into the root configuration.
|
|
||||||
func SelectEnvironment(name string) bundle.Mutator {
|
|
||||||
return &selectEnvironment{
|
|
||||||
name: name,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *selectEnvironment) Name() string {
|
|
||||||
return fmt.Sprintf("SelectEnvironment(%s)", m.name)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *selectEnvironment) Apply(_ context.Context, b *bundle.Bundle) error {
|
|
||||||
if b.Config.Environments == nil {
|
|
||||||
return fmt.Errorf("no environments defined")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get specified environment
|
|
||||||
env, ok := b.Config.Environments[m.name]
|
|
||||||
if !ok {
|
|
||||||
return fmt.Errorf("%s: no such environment", m.name)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Merge specified environment into root configuration structure.
|
|
||||||
err := b.Config.MergeEnvironment(env)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Store specified environment in configuration for reference.
|
|
||||||
b.Config.Bundle.Environment = m.name
|
|
||||||
|
|
||||||
// Clear environments after loading.
|
|
||||||
b.Config.Environments = nil
|
|
||||||
return nil
|
|
||||||
}
|
|
|
@ -0,0 +1,56 @@
|
||||||
|
package mutator
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/databricks/cli/bundle"
|
||||||
|
"golang.org/x/exp/maps"
|
||||||
|
)
|
||||||
|
|
||||||
|
type selectTarget struct {
|
||||||
|
name string
|
||||||
|
}
|
||||||
|
|
||||||
|
// SelectTarget merges the specified target into the root configuration.
|
||||||
|
func SelectTarget(name string) bundle.Mutator {
|
||||||
|
return &selectTarget{
|
||||||
|
name: name,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *selectTarget) Name() string {
|
||||||
|
return fmt.Sprintf("SelectTarget(%s)", m.name)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *selectTarget) Apply(_ context.Context, b *bundle.Bundle) error {
|
||||||
|
if b.Config.Targets == nil {
|
||||||
|
return fmt.Errorf("no targets defined")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get specified target
|
||||||
|
target, ok := b.Config.Targets[m.name]
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("%s: no such target. Available targets: %s", m.name, strings.Join(maps.Keys(b.Config.Targets), ", "))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Merge specified target into root configuration structure.
|
||||||
|
err := b.Config.MergeTargetOverrides(target)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Store specified target in configuration for reference.
|
||||||
|
b.Config.Bundle.Target = m.name
|
||||||
|
|
||||||
|
// We do this for backward compatibility.
|
||||||
|
// TODO: remove when Environments section is not supported anymore.
|
||||||
|
b.Config.Bundle.Environment = b.Config.Bundle.Target
|
||||||
|
|
||||||
|
// Clear targets after loading.
|
||||||
|
b.Config.Targets = nil
|
||||||
|
b.Config.Environments = nil
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
|
@ -11,13 +11,13 @@ import (
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestSelectEnvironment(t *testing.T) {
|
func TestSelectTarget(t *testing.T) {
|
||||||
bundle := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
Workspace: config.Workspace{
|
Workspace: config.Workspace{
|
||||||
Host: "foo",
|
Host: "foo",
|
||||||
},
|
},
|
||||||
Environments: map[string]*config.Environment{
|
Targets: map[string]*config.Target{
|
||||||
"default": {
|
"default": {
|
||||||
Workspace: &config.Workspace{
|
Workspace: &config.Workspace{
|
||||||
Host: "bar",
|
Host: "bar",
|
||||||
|
@ -26,19 +26,19 @@ func TestSelectEnvironment(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
err := mutator.SelectEnvironment("default").Apply(context.Background(), bundle)
|
err := bundle.Apply(context.Background(), b, mutator.SelectTarget("default"))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, "bar", bundle.Config.Workspace.Host)
|
assert.Equal(t, "bar", b.Config.Workspace.Host)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestSelectEnvironmentNotFound(t *testing.T) {
|
func TestSelectTargetNotFound(t *testing.T) {
|
||||||
bundle := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
Environments: map[string]*config.Environment{
|
Targets: map[string]*config.Target{
|
||||||
"default": {},
|
"default": {},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
err := mutator.SelectEnvironment("doesnt-exist").Apply(context.Background(), bundle)
|
err := bundle.Apply(context.Background(), b, mutator.SelectTarget("doesnt-exist"))
|
||||||
require.Error(t, err, "no environments defined")
|
require.Error(t, err, "no targets defined")
|
||||||
}
|
}
|
|
@ -3,10 +3,10 @@ package mutator
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle"
|
"github.com/databricks/cli/bundle"
|
||||||
"github.com/databricks/cli/bundle/config/variable"
|
"github.com/databricks/cli/bundle/config/variable"
|
||||||
|
"github.com/databricks/cli/libs/env"
|
||||||
)
|
)
|
||||||
|
|
||||||
const bundleVarPrefix = "BUNDLE_VAR_"
|
const bundleVarPrefix = "BUNDLE_VAR_"
|
||||||
|
@ -21,7 +21,7 @@ func (m *setVariables) Name() string {
|
||||||
return "SetVariables"
|
return "SetVariables"
|
||||||
}
|
}
|
||||||
|
|
||||||
func setVariable(v *variable.Variable, name string) error {
|
func setVariable(ctx context.Context, v *variable.Variable, name string) error {
|
||||||
// case: variable already has value initialized, so skip
|
// case: variable already has value initialized, so skip
|
||||||
if v.HasValue() {
|
if v.HasValue() {
|
||||||
return nil
|
return nil
|
||||||
|
@ -29,7 +29,7 @@ func setVariable(v *variable.Variable, name string) error {
|
||||||
|
|
||||||
// case: read and set variable value from process environment
|
// case: read and set variable value from process environment
|
||||||
envVarName := bundleVarPrefix + name
|
envVarName := bundleVarPrefix + name
|
||||||
if val, ok := os.LookupEnv(envVarName); ok {
|
if val, ok := env.Lookup(ctx, envVarName); ok {
|
||||||
err := v.Set(val)
|
err := v.Set(val)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf(`failed to assign value "%s" to variable %s from environment variable %s with error: %w`, val, name, envVarName, err)
|
return fmt.Errorf(`failed to assign value "%s" to variable %s from environment variable %s with error: %w`, val, name, envVarName, err)
|
||||||
|
@ -54,7 +54,7 @@ func setVariable(v *variable.Variable, name string) error {
|
||||||
|
|
||||||
func (m *setVariables) Apply(ctx context.Context, b *bundle.Bundle) error {
|
func (m *setVariables) Apply(ctx context.Context, b *bundle.Bundle) error {
|
||||||
for name, variable := range b.Config.Variables {
|
for name, variable := range b.Config.Variables {
|
||||||
err := setVariable(variable, name)
|
err := setVariable(ctx, variable, name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
|
@ -21,7 +21,7 @@ func TestSetVariableFromProcessEnvVar(t *testing.T) {
|
||||||
// set value for variable as an environment variable
|
// set value for variable as an environment variable
|
||||||
t.Setenv("BUNDLE_VAR_foo", "process-env")
|
t.Setenv("BUNDLE_VAR_foo", "process-env")
|
||||||
|
|
||||||
err := setVariable(&variable, "foo")
|
err := setVariable(context.Background(), &variable, "foo")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, *variable.Value, "process-env")
|
assert.Equal(t, *variable.Value, "process-env")
|
||||||
}
|
}
|
||||||
|
@ -33,7 +33,7 @@ func TestSetVariableUsingDefaultValue(t *testing.T) {
|
||||||
Default: &defaultVal,
|
Default: &defaultVal,
|
||||||
}
|
}
|
||||||
|
|
||||||
err := setVariable(&variable, "foo")
|
err := setVariable(context.Background(), &variable, "foo")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, *variable.Value, "default")
|
assert.Equal(t, *variable.Value, "default")
|
||||||
}
|
}
|
||||||
|
@ -49,7 +49,7 @@ func TestSetVariableWhenAlreadyAValueIsAssigned(t *testing.T) {
|
||||||
|
|
||||||
// since a value is already assigned to the variable, it would not be overridden
|
// since a value is already assigned to the variable, it would not be overridden
|
||||||
// by the default value
|
// by the default value
|
||||||
err := setVariable(&variable, "foo")
|
err := setVariable(context.Background(), &variable, "foo")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, *variable.Value, "assigned-value")
|
assert.Equal(t, *variable.Value, "assigned-value")
|
||||||
}
|
}
|
||||||
|
@ -68,7 +68,7 @@ func TestSetVariableEnvVarValueDoesNotOverridePresetValue(t *testing.T) {
|
||||||
|
|
||||||
// since a value is already assigned to the variable, it would not be overridden
|
// since a value is already assigned to the variable, it would not be overridden
|
||||||
// by the value from environment
|
// by the value from environment
|
||||||
err := setVariable(&variable, "foo")
|
err := setVariable(context.Background(), &variable, "foo")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, *variable.Value, "assigned-value")
|
assert.Equal(t, *variable.Value, "assigned-value")
|
||||||
}
|
}
|
||||||
|
@ -79,7 +79,7 @@ func TestSetVariablesErrorsIfAValueCouldNotBeResolved(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// fails because we could not resolve a value for the variable
|
// fails because we could not resolve a value for the variable
|
||||||
err := setVariable(&variable, "foo")
|
err := setVariable(context.Background(), &variable, "foo")
|
||||||
assert.ErrorContains(t, err, "no value assigned to required variable foo. Assignment can be done through the \"--var\" flag or by setting the BUNDLE_VAR_foo environment variable")
|
assert.ErrorContains(t, err, "no value assigned to required variable foo. Assignment can be done through the \"--var\" flag or by setting the BUNDLE_VAR_foo environment variable")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -87,7 +87,7 @@ func TestSetVariablesMutator(t *testing.T) {
|
||||||
defaultValForA := "default-a"
|
defaultValForA := "default-a"
|
||||||
defaultValForB := "default-b"
|
defaultValForB := "default-b"
|
||||||
valForC := "assigned-val-c"
|
valForC := "assigned-val-c"
|
||||||
bundle := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
Variables: map[string]*variable.Variable{
|
Variables: map[string]*variable.Variable{
|
||||||
"a": {
|
"a": {
|
||||||
|
@ -108,9 +108,9 @@ func TestSetVariablesMutator(t *testing.T) {
|
||||||
|
|
||||||
t.Setenv("BUNDLE_VAR_b", "env-var-b")
|
t.Setenv("BUNDLE_VAR_b", "env-var-b")
|
||||||
|
|
||||||
err := SetVariables().Apply(context.Background(), bundle)
|
err := bundle.Apply(context.Background(), b, SetVariables())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, "default-a", *bundle.Config.Variables["a"].Value)
|
assert.Equal(t, "default-a", *b.Config.Variables["a"].Value)
|
||||||
assert.Equal(t, "env-var-b", *bundle.Config.Variables["b"].Value)
|
assert.Equal(t, "env-var-b", *b.Config.Variables["b"].Value)
|
||||||
assert.Equal(t, "assigned-val-c", *bundle.Config.Variables["c"].Value)
|
assert.Equal(t, "assigned-val-c", *b.Config.Variables["c"].Value)
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,100 @@
|
||||||
|
package mutator
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"path"
|
||||||
|
"path/filepath"
|
||||||
|
"text/template"
|
||||||
|
|
||||||
|
"github.com/databricks/cli/bundle"
|
||||||
|
"github.com/databricks/databricks-sdk-go/service/jobs"
|
||||||
|
)
|
||||||
|
|
||||||
|
type TaskWithJobKey struct {
|
||||||
|
Task *jobs.Task
|
||||||
|
JobKey string
|
||||||
|
}
|
||||||
|
|
||||||
|
type TrampolineFunctions interface {
|
||||||
|
GetTemplateData(task *jobs.Task) (map[string]any, error)
|
||||||
|
GetTasks(b *bundle.Bundle) []TaskWithJobKey
|
||||||
|
CleanUp(task *jobs.Task) error
|
||||||
|
}
|
||||||
|
type trampoline struct {
|
||||||
|
name string
|
||||||
|
functions TrampolineFunctions
|
||||||
|
template string
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewTrampoline(
|
||||||
|
name string,
|
||||||
|
functions TrampolineFunctions,
|
||||||
|
template string,
|
||||||
|
) *trampoline {
|
||||||
|
return &trampoline{name, functions, template}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *trampoline) Name() string {
|
||||||
|
return fmt.Sprintf("trampoline(%s)", m.name)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *trampoline) Apply(ctx context.Context, b *bundle.Bundle) error {
|
||||||
|
tasks := m.functions.GetTasks(b)
|
||||||
|
for _, task := range tasks {
|
||||||
|
err := m.generateNotebookWrapper(ctx, b, task)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *trampoline) generateNotebookWrapper(ctx context.Context, b *bundle.Bundle, task TaskWithJobKey) error {
|
||||||
|
internalDir, err := b.InternalDir(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
notebookName := fmt.Sprintf("notebook_%s_%s", task.JobKey, task.Task.TaskKey)
|
||||||
|
localNotebookPath := filepath.Join(internalDir, notebookName+".py")
|
||||||
|
|
||||||
|
err = os.MkdirAll(filepath.Dir(localNotebookPath), 0755)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
f, err := os.Create(localNotebookPath)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
|
||||||
|
data, err := m.functions.GetTemplateData(task.Task)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
t, err := template.New(notebookName).Parse(m.template)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
internalDirRel, err := filepath.Rel(b.Config.Path, internalDir)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = m.functions.CleanUp(task.Task)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
remotePath := path.Join(b.Config.Workspace.FilePath, filepath.ToSlash(internalDirRel), notebookName)
|
||||||
|
|
||||||
|
task.Task.NotebookTask = &jobs.NotebookTask{
|
||||||
|
NotebookPath: remotePath,
|
||||||
|
}
|
||||||
|
|
||||||
|
return t.Execute(f, data)
|
||||||
|
}
|
|
@ -0,0 +1,98 @@
|
||||||
|
package mutator
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/databricks/cli/bundle"
|
||||||
|
"github.com/databricks/cli/bundle/config"
|
||||||
|
"github.com/databricks/cli/bundle/config/paths"
|
||||||
|
"github.com/databricks/cli/bundle/config/resources"
|
||||||
|
"github.com/databricks/databricks-sdk-go/service/jobs"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
type functions struct{}
|
||||||
|
|
||||||
|
func (f *functions) GetTasks(b *bundle.Bundle) []TaskWithJobKey {
|
||||||
|
tasks := make([]TaskWithJobKey, 0)
|
||||||
|
for k := range b.Config.Resources.Jobs["test"].Tasks {
|
||||||
|
tasks = append(tasks, TaskWithJobKey{
|
||||||
|
JobKey: "test",
|
||||||
|
Task: &b.Config.Resources.Jobs["test"].Tasks[k],
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
return tasks
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *functions) GetTemplateData(task *jobs.Task) (map[string]any, error) {
|
||||||
|
if task.PythonWheelTask == nil {
|
||||||
|
return nil, fmt.Errorf("PythonWheelTask cannot be nil")
|
||||||
|
}
|
||||||
|
|
||||||
|
data := make(map[string]any)
|
||||||
|
data["MyName"] = "Trampoline"
|
||||||
|
return data, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *functions) CleanUp(task *jobs.Task) error {
|
||||||
|
task.PythonWheelTask = nil
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGenerateTrampoline(t *testing.T) {
|
||||||
|
tmpDir := t.TempDir()
|
||||||
|
|
||||||
|
tasks := []jobs.Task{
|
||||||
|
{
|
||||||
|
TaskKey: "to_trampoline",
|
||||||
|
PythonWheelTask: &jobs.PythonWheelTask{
|
||||||
|
PackageName: "test",
|
||||||
|
EntryPoint: "run",
|
||||||
|
}},
|
||||||
|
}
|
||||||
|
|
||||||
|
b := &bundle.Bundle{
|
||||||
|
Config: config.Root{
|
||||||
|
Path: tmpDir,
|
||||||
|
Bundle: config.Bundle{
|
||||||
|
Target: "development",
|
||||||
|
},
|
||||||
|
Resources: config.Resources{
|
||||||
|
Jobs: map[string]*resources.Job{
|
||||||
|
"test": {
|
||||||
|
Paths: paths.Paths{
|
||||||
|
ConfigFilePath: tmpDir,
|
||||||
|
},
|
||||||
|
JobSettings: &jobs.JobSettings{
|
||||||
|
Tasks: tasks,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
|
funcs := functions{}
|
||||||
|
trampoline := NewTrampoline("test_trampoline", &funcs, "Hello from {{.MyName}}")
|
||||||
|
err := bundle.Apply(ctx, b, trampoline)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
dir, err := b.InternalDir(ctx)
|
||||||
|
require.NoError(t, err)
|
||||||
|
filename := filepath.Join(dir, "notebook_test_to_trampoline.py")
|
||||||
|
|
||||||
|
bytes, err := os.ReadFile(filename)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
require.Equal(t, "Hello from Trampoline", string(bytes))
|
||||||
|
|
||||||
|
task := b.Config.Resources.Jobs["test"].Tasks[0]
|
||||||
|
require.Equal(t, task.NotebookTask.NotebookPath, ".databricks/bundle/development/.internal/notebook_test_to_trampoline")
|
||||||
|
require.Nil(t, task.PythonWheelTask)
|
||||||
|
}
|
|
@ -4,6 +4,7 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"net/url"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
@ -11,8 +12,6 @@ import (
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle"
|
"github.com/databricks/cli/bundle"
|
||||||
"github.com/databricks/cli/libs/notebook"
|
"github.com/databricks/cli/libs/notebook"
|
||||||
"github.com/databricks/databricks-sdk-go/service/jobs"
|
|
||||||
"github.com/databricks/databricks-sdk-go/service/pipelines"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type ErrIsNotebook struct {
|
type ErrIsNotebook struct {
|
||||||
|
@ -44,7 +43,9 @@ func (m *translatePaths) Name() string {
|
||||||
return "TranslatePaths"
|
return "TranslatePaths"
|
||||||
}
|
}
|
||||||
|
|
||||||
// rewritePath converts a given relative path to a stable remote workspace path.
|
type rewriteFunc func(literal, localFullPath, localRelPath, remotePath string) (string, error)
|
||||||
|
|
||||||
|
// rewritePath converts a given relative path from the loaded config to a new path based on the passed rewriting function
|
||||||
//
|
//
|
||||||
// It takes these arguments:
|
// It takes these arguments:
|
||||||
// - The argument `dir` is the directory relative to which the given relative path is.
|
// - The argument `dir` is the directory relative to which the given relative path is.
|
||||||
|
@ -57,13 +58,23 @@ func (m *translatePaths) rewritePath(
|
||||||
dir string,
|
dir string,
|
||||||
b *bundle.Bundle,
|
b *bundle.Bundle,
|
||||||
p *string,
|
p *string,
|
||||||
fn func(literal, localPath, remotePath string) (string, error),
|
fn rewriteFunc,
|
||||||
) error {
|
) error {
|
||||||
// We assume absolute paths point to a location in the workspace
|
// We assume absolute paths point to a location in the workspace
|
||||||
if path.IsAbs(filepath.ToSlash(*p)) {
|
if path.IsAbs(filepath.ToSlash(*p)) {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
url, err := url.Parse(*p)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// If the file path has scheme, it's a full path and we don't need to transform it
|
||||||
|
if url.Scheme != "" {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// Local path is relative to the directory the resource was defined in.
|
// Local path is relative to the directory the resource was defined in.
|
||||||
localPath := filepath.Join(dir, filepath.FromSlash(*p))
|
localPath := filepath.Join(dir, filepath.FromSlash(*p))
|
||||||
if interp, ok := m.seen[localPath]; ok {
|
if interp, ok := m.seen[localPath]; ok {
|
||||||
|
@ -72,19 +83,19 @@ func (m *translatePaths) rewritePath(
|
||||||
}
|
}
|
||||||
|
|
||||||
// Remote path must be relative to the bundle root.
|
// Remote path must be relative to the bundle root.
|
||||||
remotePath, err := filepath.Rel(b.Config.Path, localPath)
|
localRelPath, err := filepath.Rel(b.Config.Path, localPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if strings.HasPrefix(remotePath, "..") {
|
if strings.HasPrefix(localRelPath, "..") {
|
||||||
return fmt.Errorf("path %s is not contained in bundle root path", localPath)
|
return fmt.Errorf("path %s is not contained in bundle root path", localPath)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Prefix remote path with its remote root path.
|
// Prefix remote path with its remote root path.
|
||||||
remotePath = path.Join(b.Config.Workspace.FilesPath, filepath.ToSlash(remotePath))
|
remotePath := path.Join(b.Config.Workspace.FilePath, filepath.ToSlash(localRelPath))
|
||||||
|
|
||||||
// Convert local path into workspace path via specified function.
|
// Convert local path into workspace path via specified function.
|
||||||
interp, err := fn(*p, localPath, filepath.ToSlash(remotePath))
|
interp, err := fn(*p, localPath, localRelPath, filepath.ToSlash(remotePath))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -94,81 +105,80 @@ func (m *translatePaths) rewritePath(
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *translatePaths) translateNotebookPath(literal, localPath, remotePath string) (string, error) {
|
func translateNotebookPath(literal, localFullPath, localRelPath, remotePath string) (string, error) {
|
||||||
nb, _, err := notebook.Detect(localPath)
|
nb, _, err := notebook.Detect(localFullPath)
|
||||||
if os.IsNotExist(err) {
|
if os.IsNotExist(err) {
|
||||||
return "", fmt.Errorf("notebook %s not found", literal)
|
return "", fmt.Errorf("notebook %s not found", literal)
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", fmt.Errorf("unable to determine if %s is a notebook: %w", localPath, err)
|
return "", fmt.Errorf("unable to determine if %s is a notebook: %w", localFullPath, err)
|
||||||
}
|
}
|
||||||
if !nb {
|
if !nb {
|
||||||
return "", ErrIsNotNotebook{localPath}
|
return "", ErrIsNotNotebook{localFullPath}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Upon import, notebooks are stripped of their extension.
|
// Upon import, notebooks are stripped of their extension.
|
||||||
return strings.TrimSuffix(remotePath, filepath.Ext(localPath)), nil
|
return strings.TrimSuffix(remotePath, filepath.Ext(localFullPath)), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *translatePaths) translateFilePath(literal, localPath, remotePath string) (string, error) {
|
func translateFilePath(literal, localFullPath, localRelPath, remotePath string) (string, error) {
|
||||||
nb, _, err := notebook.Detect(localPath)
|
nb, _, err := notebook.Detect(localFullPath)
|
||||||
if os.IsNotExist(err) {
|
if os.IsNotExist(err) {
|
||||||
return "", fmt.Errorf("file %s not found", literal)
|
return "", fmt.Errorf("file %s not found", literal)
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", fmt.Errorf("unable to determine if %s is not a notebook: %w", localPath, err)
|
return "", fmt.Errorf("unable to determine if %s is not a notebook: %w", localFullPath, err)
|
||||||
}
|
}
|
||||||
if nb {
|
if nb {
|
||||||
return "", ErrIsNotebook{localPath}
|
return "", ErrIsNotebook{localFullPath}
|
||||||
}
|
}
|
||||||
return remotePath, nil
|
return remotePath, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *translatePaths) translateJobTask(dir string, b *bundle.Bundle, task *jobs.Task) error {
|
func translateDirectoryPath(literal, localFullPath, localRelPath, remotePath string) (string, error) {
|
||||||
var err error
|
info, err := os.Stat(localFullPath)
|
||||||
|
|
||||||
if task.NotebookTask != nil {
|
|
||||||
err = m.rewritePath(dir, b, &task.NotebookTask.NotebookPath, m.translateNotebookPath)
|
|
||||||
if target := (&ErrIsNotNotebook{}); errors.As(err, target) {
|
|
||||||
return fmt.Errorf(`expected a notebook for "tasks.notebook_task.notebook_path" but got a file: %w`, target)
|
|
||||||
}
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return "", err
|
||||||
}
|
}
|
||||||
|
if !info.IsDir() {
|
||||||
|
return "", fmt.Errorf("%s is not a directory", localFullPath)
|
||||||
|
}
|
||||||
|
return remotePath, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if task.SparkPythonTask != nil {
|
func translateNoOp(literal, localFullPath, localRelPath, remotePath string) (string, error) {
|
||||||
err = m.rewritePath(dir, b, &task.SparkPythonTask.PythonFile, m.translateFilePath)
|
return localRelPath, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type transformer struct {
|
||||||
|
// A directory path relative to which `path` will be transformed
|
||||||
|
dir string
|
||||||
|
// A path to transform
|
||||||
|
path *string
|
||||||
|
// Name of the config property where the path string is coming from
|
||||||
|
configPath string
|
||||||
|
// A function that performs the actual rewriting logic.
|
||||||
|
fn rewriteFunc
|
||||||
|
}
|
||||||
|
|
||||||
|
type transformFunc func(resource any, dir string) *transformer
|
||||||
|
|
||||||
|
// Apply all matches transformers for the given resource
|
||||||
|
func (m *translatePaths) applyTransformers(funcs []transformFunc, b *bundle.Bundle, resource any, dir string) error {
|
||||||
|
for _, transformFn := range funcs {
|
||||||
|
transformer := transformFn(resource, dir)
|
||||||
|
if transformer == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
err := m.rewritePath(transformer.dir, b, transformer.path, transformer.fn)
|
||||||
|
if err != nil {
|
||||||
if target := (&ErrIsNotebook{}); errors.As(err, target) {
|
if target := (&ErrIsNotebook{}); errors.As(err, target) {
|
||||||
return fmt.Errorf(`expected a file for "tasks.spark_python_task.python_file" but got a notebook: %w`, target)
|
return fmt.Errorf(`expected a file for "%s" but got a notebook: %w`, transformer.configPath, target)
|
||||||
}
|
}
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *translatePaths) translatePipelineLibrary(dir string, b *bundle.Bundle, library *pipelines.PipelineLibrary) error {
|
|
||||||
var err error
|
|
||||||
|
|
||||||
if library.Notebook != nil {
|
|
||||||
err = m.rewritePath(dir, b, &library.Notebook.Path, m.translateNotebookPath)
|
|
||||||
if target := (&ErrIsNotNotebook{}); errors.As(err, target) {
|
if target := (&ErrIsNotNotebook{}); errors.As(err, target) {
|
||||||
return fmt.Errorf(`expected a notebook for "libraries.notebook.path" but got a file: %w`, target)
|
return fmt.Errorf(`expected a notebook for "%s" but got a file: %w`, transformer.configPath, target)
|
||||||
}
|
}
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if library.File != nil {
|
|
||||||
err = m.rewritePath(dir, b, &library.File.Path, m.translateFilePath)
|
|
||||||
if target := (&ErrIsNotebook{}); errors.As(err, target) {
|
|
||||||
return fmt.Errorf(`expected a file for "libraries.file.path" but got a notebook: %w`, target)
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -179,38 +189,16 @@ func (m *translatePaths) translatePipelineLibrary(dir string, b *bundle.Bundle,
|
||||||
func (m *translatePaths) Apply(_ context.Context, b *bundle.Bundle) error {
|
func (m *translatePaths) Apply(_ context.Context, b *bundle.Bundle) error {
|
||||||
m.seen = make(map[string]string)
|
m.seen = make(map[string]string)
|
||||||
|
|
||||||
for key, job := range b.Config.Resources.Jobs {
|
for _, fn := range []func(*translatePaths, *bundle.Bundle) error{
|
||||||
dir, err := job.ConfigFileDirectory()
|
applyJobTransformers,
|
||||||
if err != nil {
|
applyPipelineTransformers,
|
||||||
return fmt.Errorf("unable to determine directory for job %s: %w", key, err)
|
applyArtifactTransformers,
|
||||||
}
|
} {
|
||||||
|
err := fn(m, b)
|
||||||
// Do not translate job task paths if using git source
|
|
||||||
if job.GitSource != nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
for i := 0; i < len(job.Tasks); i++ {
|
|
||||||
err := m.translateJobTask(dir, b, &job.Tasks[i])
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
for key, pipeline := range b.Config.Resources.Pipelines {
|
|
||||||
dir, err := pipeline.ConfigFileDirectory()
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("unable to determine directory for pipeline %s: %w", key, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
for i := 0; i < len(pipeline.Libraries); i++ {
|
|
||||||
err := m.translatePipelineLibrary(dir, b, &pipeline.Libraries[i])
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,42 @@
|
||||||
|
package mutator
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/databricks/cli/bundle"
|
||||||
|
"github.com/databricks/cli/bundle/config"
|
||||||
|
)
|
||||||
|
|
||||||
|
func transformArtifactPath(resource any, dir string) *transformer {
|
||||||
|
artifact, ok := resource.(*config.Artifact)
|
||||||
|
if !ok {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return &transformer{
|
||||||
|
dir,
|
||||||
|
&artifact.Path,
|
||||||
|
"artifacts.path",
|
||||||
|
translateNoOp,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func applyArtifactTransformers(m *translatePaths, b *bundle.Bundle) error {
|
||||||
|
artifactTransformers := []transformFunc{
|
||||||
|
transformArtifactPath,
|
||||||
|
}
|
||||||
|
|
||||||
|
for key, artifact := range b.Config.Artifacts {
|
||||||
|
dir, err := artifact.ConfigFileDirectory()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("unable to determine directory for artifact %s: %w", key, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = m.applyTransformers(artifactTransformers, b, artifact, dir)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
|
@ -0,0 +1,133 @@
|
||||||
|
package mutator
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/databricks/cli/bundle"
|
||||||
|
"github.com/databricks/databricks-sdk-go/service/compute"
|
||||||
|
"github.com/databricks/databricks-sdk-go/service/jobs"
|
||||||
|
)
|
||||||
|
|
||||||
|
func transformNotebookTask(resource any, dir string) *transformer {
|
||||||
|
task, ok := resource.(*jobs.Task)
|
||||||
|
if !ok || task.NotebookTask == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return &transformer{
|
||||||
|
dir,
|
||||||
|
&task.NotebookTask.NotebookPath,
|
||||||
|
"tasks.notebook_task.notebook_path",
|
||||||
|
translateNotebookPath,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func transformSparkTask(resource any, dir string) *transformer {
|
||||||
|
task, ok := resource.(*jobs.Task)
|
||||||
|
if !ok || task.SparkPythonTask == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return &transformer{
|
||||||
|
dir,
|
||||||
|
&task.SparkPythonTask.PythonFile,
|
||||||
|
"tasks.spark_python_task.python_file",
|
||||||
|
translateFilePath,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func transformWhlLibrary(resource any, dir string) *transformer {
|
||||||
|
library, ok := resource.(*compute.Library)
|
||||||
|
if !ok || library.Whl == "" {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return &transformer{
|
||||||
|
dir,
|
||||||
|
&library.Whl,
|
||||||
|
"libraries.whl",
|
||||||
|
translateNoOp, // Does not convert to remote path but makes sure that nested paths resolved correctly
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func transformDbtTask(resource any, dir string) *transformer {
|
||||||
|
task, ok := resource.(*jobs.Task)
|
||||||
|
if !ok || task.DbtTask == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return &transformer{
|
||||||
|
dir,
|
||||||
|
&task.DbtTask.ProjectDirectory,
|
||||||
|
"tasks.dbt_task.project_directory",
|
||||||
|
translateDirectoryPath,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func transformSqlFileTask(resource any, dir string) *transformer {
|
||||||
|
task, ok := resource.(*jobs.Task)
|
||||||
|
if !ok || task.SqlTask == nil || task.SqlTask.File == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return &transformer{
|
||||||
|
dir,
|
||||||
|
&task.SqlTask.File.Path,
|
||||||
|
"tasks.sql_task.file.path",
|
||||||
|
translateFilePath,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func transformJarLibrary(resource any, dir string) *transformer {
|
||||||
|
library, ok := resource.(*compute.Library)
|
||||||
|
if !ok || library.Jar == "" {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return &transformer{
|
||||||
|
dir,
|
||||||
|
&library.Jar,
|
||||||
|
"libraries.jar",
|
||||||
|
translateNoOp, // Does not convert to remote path but makes sure that nested paths resolved correctly
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func applyJobTransformers(m *translatePaths, b *bundle.Bundle) error {
|
||||||
|
jobTransformers := []transformFunc{
|
||||||
|
transformNotebookTask,
|
||||||
|
transformSparkTask,
|
||||||
|
transformWhlLibrary,
|
||||||
|
transformJarLibrary,
|
||||||
|
transformDbtTask,
|
||||||
|
transformSqlFileTask,
|
||||||
|
}
|
||||||
|
|
||||||
|
for key, job := range b.Config.Resources.Jobs {
|
||||||
|
dir, err := job.ConfigFileDirectory()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("unable to determine directory for job %s: %w", key, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Do not translate job task paths if using git source
|
||||||
|
if job.GitSource != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := 0; i < len(job.Tasks); i++ {
|
||||||
|
task := &job.Tasks[i]
|
||||||
|
err := m.applyTransformers(jobTransformers, b, task, dir)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for j := 0; j < len(task.Libraries); j++ {
|
||||||
|
library := &task.Libraries[j]
|
||||||
|
err := m.applyTransformers(jobTransformers, b, library, dir)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
|
@ -0,0 +1,60 @@
|
||||||
|
package mutator
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/databricks/cli/bundle"
|
||||||
|
"github.com/databricks/databricks-sdk-go/service/pipelines"
|
||||||
|
)
|
||||||
|
|
||||||
|
func transformLibraryNotebook(resource any, dir string) *transformer {
|
||||||
|
library, ok := resource.(*pipelines.PipelineLibrary)
|
||||||
|
if !ok || library.Notebook == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return &transformer{
|
||||||
|
dir,
|
||||||
|
&library.Notebook.Path,
|
||||||
|
"libraries.notebook.path",
|
||||||
|
translateNotebookPath,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func transformLibraryFile(resource any, dir string) *transformer {
|
||||||
|
library, ok := resource.(*pipelines.PipelineLibrary)
|
||||||
|
if !ok || library.File == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return &transformer{
|
||||||
|
dir,
|
||||||
|
&library.File.Path,
|
||||||
|
"libraries.file.path",
|
||||||
|
translateFilePath,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func applyPipelineTransformers(m *translatePaths, b *bundle.Bundle) error {
|
||||||
|
pipelineTransformers := []transformFunc{
|
||||||
|
transformLibraryNotebook,
|
||||||
|
transformLibraryFile,
|
||||||
|
}
|
||||||
|
|
||||||
|
for key, pipeline := range b.Config.Resources.Pipelines {
|
||||||
|
dir, err := pipeline.ConfigFileDirectory()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("unable to determine directory for pipeline %s: %w", key, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := 0; i < len(pipeline.Libraries); i++ {
|
||||||
|
library := &pipeline.Libraries[i]
|
||||||
|
err := m.applyTransformers(pipelineTransformers, b, library, dir)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
|
@ -9,7 +9,9 @@ import (
|
||||||
"github.com/databricks/cli/bundle"
|
"github.com/databricks/cli/bundle"
|
||||||
"github.com/databricks/cli/bundle/config"
|
"github.com/databricks/cli/bundle/config"
|
||||||
"github.com/databricks/cli/bundle/config/mutator"
|
"github.com/databricks/cli/bundle/config/mutator"
|
||||||
|
"github.com/databricks/cli/bundle/config/paths"
|
||||||
"github.com/databricks/cli/bundle/config/resources"
|
"github.com/databricks/cli/bundle/config/resources"
|
||||||
|
"github.com/databricks/databricks-sdk-go/service/compute"
|
||||||
"github.com/databricks/databricks-sdk-go/service/jobs"
|
"github.com/databricks/databricks-sdk-go/service/jobs"
|
||||||
"github.com/databricks/databricks-sdk-go/service/pipelines"
|
"github.com/databricks/databricks-sdk-go/service/pipelines"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
|
@ -33,17 +35,17 @@ func touchEmptyFile(t *testing.T, path string) {
|
||||||
|
|
||||||
func TestTranslatePathsSkippedWithGitSource(t *testing.T) {
|
func TestTranslatePathsSkippedWithGitSource(t *testing.T) {
|
||||||
dir := t.TempDir()
|
dir := t.TempDir()
|
||||||
bundle := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
Path: dir,
|
Path: dir,
|
||||||
Workspace: config.Workspace{
|
Workspace: config.Workspace{
|
||||||
FilesPath: "/bundle",
|
FilePath: "/bundle",
|
||||||
},
|
},
|
||||||
Resources: config.Resources{
|
Resources: config.Resources{
|
||||||
Jobs: map[string]*resources.Job{
|
Jobs: map[string]*resources.Job{
|
||||||
"job": {
|
"job": {
|
||||||
|
|
||||||
Paths: resources.Paths{
|
Paths: paths.Paths{
|
||||||
ConfigFilePath: filepath.Join(dir, "resource.yml"),
|
ConfigFilePath: filepath.Join(dir, "resource.yml"),
|
||||||
},
|
},
|
||||||
JobSettings: &jobs.JobSettings{
|
JobSettings: &jobs.JobSettings{
|
||||||
|
@ -78,23 +80,23 @@ func TestTranslatePathsSkippedWithGitSource(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
err := mutator.TranslatePaths().Apply(context.Background(), bundle)
|
err := bundle.Apply(context.Background(), b, mutator.TranslatePaths())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
assert.Equal(
|
assert.Equal(
|
||||||
t,
|
t,
|
||||||
"my_job_notebook.py",
|
"my_job_notebook.py",
|
||||||
bundle.Config.Resources.Jobs["job"].Tasks[0].NotebookTask.NotebookPath,
|
b.Config.Resources.Jobs["job"].Tasks[0].NotebookTask.NotebookPath,
|
||||||
)
|
)
|
||||||
assert.Equal(
|
assert.Equal(
|
||||||
t,
|
t,
|
||||||
"foo",
|
"foo",
|
||||||
bundle.Config.Resources.Jobs["job"].Tasks[1].PythonWheelTask.PackageName,
|
b.Config.Resources.Jobs["job"].Tasks[1].PythonWheelTask.PackageName,
|
||||||
)
|
)
|
||||||
assert.Equal(
|
assert.Equal(
|
||||||
t,
|
t,
|
||||||
"my_python_file.py",
|
"my_python_file.py",
|
||||||
bundle.Config.Resources.Jobs["job"].Tasks[2].SparkPythonTask.PythonFile,
|
b.Config.Resources.Jobs["job"].Tasks[2].SparkPythonTask.PythonFile,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -103,17 +105,18 @@ func TestTranslatePaths(t *testing.T) {
|
||||||
touchNotebookFile(t, filepath.Join(dir, "my_job_notebook.py"))
|
touchNotebookFile(t, filepath.Join(dir, "my_job_notebook.py"))
|
||||||
touchNotebookFile(t, filepath.Join(dir, "my_pipeline_notebook.py"))
|
touchNotebookFile(t, filepath.Join(dir, "my_pipeline_notebook.py"))
|
||||||
touchEmptyFile(t, filepath.Join(dir, "my_python_file.py"))
|
touchEmptyFile(t, filepath.Join(dir, "my_python_file.py"))
|
||||||
|
touchEmptyFile(t, filepath.Join(dir, "dist", "task.jar"))
|
||||||
|
|
||||||
bundle := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
Path: dir,
|
Path: dir,
|
||||||
Workspace: config.Workspace{
|
Workspace: config.Workspace{
|
||||||
FilesPath: "/bundle",
|
FilePath: "/bundle",
|
||||||
},
|
},
|
||||||
Resources: config.Resources{
|
Resources: config.Resources{
|
||||||
Jobs: map[string]*resources.Job{
|
Jobs: map[string]*resources.Job{
|
||||||
"job": {
|
"job": {
|
||||||
Paths: resources.Paths{
|
Paths: paths.Paths{
|
||||||
ConfigFilePath: filepath.Join(dir, "resource.yml"),
|
ConfigFilePath: filepath.Join(dir, "resource.yml"),
|
||||||
},
|
},
|
||||||
JobSettings: &jobs.JobSettings{
|
JobSettings: &jobs.JobSettings{
|
||||||
|
@ -122,6 +125,9 @@ func TestTranslatePaths(t *testing.T) {
|
||||||
NotebookTask: &jobs.NotebookTask{
|
NotebookTask: &jobs.NotebookTask{
|
||||||
NotebookPath: "./my_job_notebook.py",
|
NotebookPath: "./my_job_notebook.py",
|
||||||
},
|
},
|
||||||
|
Libraries: []compute.Library{
|
||||||
|
{Whl: "./dist/task.whl"},
|
||||||
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
NotebookTask: &jobs.NotebookTask{
|
NotebookTask: &jobs.NotebookTask{
|
||||||
|
@ -143,13 +149,29 @@ func TestTranslatePaths(t *testing.T) {
|
||||||
PythonFile: "./my_python_file.py",
|
PythonFile: "./my_python_file.py",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
SparkJarTask: &jobs.SparkJarTask{
|
||||||
|
MainClassName: "HelloWorld",
|
||||||
|
},
|
||||||
|
Libraries: []compute.Library{
|
||||||
|
{Jar: "./dist/task.jar"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
SparkJarTask: &jobs.SparkJarTask{
|
||||||
|
MainClassName: "HelloWorldRemote",
|
||||||
|
},
|
||||||
|
Libraries: []compute.Library{
|
||||||
|
{Jar: "dbfs:/bundle/dist/task_remote.jar"},
|
||||||
|
},
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
Pipelines: map[string]*resources.Pipeline{
|
Pipelines: map[string]*resources.Pipeline{
|
||||||
"pipeline": {
|
"pipeline": {
|
||||||
Paths: resources.Paths{
|
Paths: paths.Paths{
|
||||||
ConfigFilePath: filepath.Join(dir, "resource.yml"),
|
ConfigFilePath: filepath.Join(dir, "resource.yml"),
|
||||||
},
|
},
|
||||||
PipelineSpec: &pipelines.PipelineSpec{
|
PipelineSpec: &pipelines.PipelineSpec{
|
||||||
|
@ -185,69 +207,87 @@ func TestTranslatePaths(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
err := mutator.TranslatePaths().Apply(context.Background(), bundle)
|
err := bundle.Apply(context.Background(), b, mutator.TranslatePaths())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
// Assert that the path in the tasks now refer to the artifact.
|
// Assert that the path in the tasks now refer to the artifact.
|
||||||
assert.Equal(
|
assert.Equal(
|
||||||
t,
|
t,
|
||||||
"/bundle/my_job_notebook",
|
"/bundle/my_job_notebook",
|
||||||
bundle.Config.Resources.Jobs["job"].Tasks[0].NotebookTask.NotebookPath,
|
b.Config.Resources.Jobs["job"].Tasks[0].NotebookTask.NotebookPath,
|
||||||
|
)
|
||||||
|
assert.Equal(
|
||||||
|
t,
|
||||||
|
filepath.Join("dist", "task.whl"),
|
||||||
|
b.Config.Resources.Jobs["job"].Tasks[0].Libraries[0].Whl,
|
||||||
)
|
)
|
||||||
assert.Equal(
|
assert.Equal(
|
||||||
t,
|
t,
|
||||||
"/Users/jane.doe@databricks.com/doesnt_exist.py",
|
"/Users/jane.doe@databricks.com/doesnt_exist.py",
|
||||||
bundle.Config.Resources.Jobs["job"].Tasks[1].NotebookTask.NotebookPath,
|
b.Config.Resources.Jobs["job"].Tasks[1].NotebookTask.NotebookPath,
|
||||||
)
|
)
|
||||||
assert.Equal(
|
assert.Equal(
|
||||||
t,
|
t,
|
||||||
"/bundle/my_job_notebook",
|
"/bundle/my_job_notebook",
|
||||||
bundle.Config.Resources.Jobs["job"].Tasks[2].NotebookTask.NotebookPath,
|
b.Config.Resources.Jobs["job"].Tasks[2].NotebookTask.NotebookPath,
|
||||||
)
|
)
|
||||||
assert.Equal(
|
assert.Equal(
|
||||||
t,
|
t,
|
||||||
"/bundle/my_python_file.py",
|
"/bundle/my_python_file.py",
|
||||||
bundle.Config.Resources.Jobs["job"].Tasks[4].SparkPythonTask.PythonFile,
|
b.Config.Resources.Jobs["job"].Tasks[4].SparkPythonTask.PythonFile,
|
||||||
|
)
|
||||||
|
assert.Equal(
|
||||||
|
t,
|
||||||
|
filepath.Join("dist", "task.jar"),
|
||||||
|
b.Config.Resources.Jobs["job"].Tasks[5].Libraries[0].Jar,
|
||||||
|
)
|
||||||
|
assert.Equal(
|
||||||
|
t,
|
||||||
|
"dbfs:/bundle/dist/task_remote.jar",
|
||||||
|
b.Config.Resources.Jobs["job"].Tasks[6].Libraries[0].Jar,
|
||||||
)
|
)
|
||||||
|
|
||||||
// Assert that the path in the libraries now refer to the artifact.
|
// Assert that the path in the libraries now refer to the artifact.
|
||||||
assert.Equal(
|
assert.Equal(
|
||||||
t,
|
t,
|
||||||
"/bundle/my_pipeline_notebook",
|
"/bundle/my_pipeline_notebook",
|
||||||
bundle.Config.Resources.Pipelines["pipeline"].Libraries[0].Notebook.Path,
|
b.Config.Resources.Pipelines["pipeline"].Libraries[0].Notebook.Path,
|
||||||
)
|
)
|
||||||
assert.Equal(
|
assert.Equal(
|
||||||
t,
|
t,
|
||||||
"/Users/jane.doe@databricks.com/doesnt_exist.py",
|
"/Users/jane.doe@databricks.com/doesnt_exist.py",
|
||||||
bundle.Config.Resources.Pipelines["pipeline"].Libraries[1].Notebook.Path,
|
b.Config.Resources.Pipelines["pipeline"].Libraries[1].Notebook.Path,
|
||||||
)
|
)
|
||||||
assert.Equal(
|
assert.Equal(
|
||||||
t,
|
t,
|
||||||
"/bundle/my_pipeline_notebook",
|
"/bundle/my_pipeline_notebook",
|
||||||
bundle.Config.Resources.Pipelines["pipeline"].Libraries[2].Notebook.Path,
|
b.Config.Resources.Pipelines["pipeline"].Libraries[2].Notebook.Path,
|
||||||
)
|
)
|
||||||
assert.Equal(
|
assert.Equal(
|
||||||
t,
|
t,
|
||||||
"/bundle/my_python_file.py",
|
"/bundle/my_python_file.py",
|
||||||
bundle.Config.Resources.Pipelines["pipeline"].Libraries[4].File.Path,
|
b.Config.Resources.Pipelines["pipeline"].Libraries[4].File.Path,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestTranslatePathsInSubdirectories(t *testing.T) {
|
func TestTranslatePathsInSubdirectories(t *testing.T) {
|
||||||
dir := t.TempDir()
|
dir := t.TempDir()
|
||||||
touchEmptyFile(t, filepath.Join(dir, "job", "my_python_file.py"))
|
touchEmptyFile(t, filepath.Join(dir, "job", "my_python_file.py"))
|
||||||
|
touchEmptyFile(t, filepath.Join(dir, "job", "dist", "task.jar"))
|
||||||
touchEmptyFile(t, filepath.Join(dir, "pipeline", "my_python_file.py"))
|
touchEmptyFile(t, filepath.Join(dir, "pipeline", "my_python_file.py"))
|
||||||
|
touchEmptyFile(t, filepath.Join(dir, "job", "my_sql_file.sql"))
|
||||||
|
touchEmptyFile(t, filepath.Join(dir, "job", "my_dbt_project", "dbt_project.yml"))
|
||||||
|
|
||||||
bundle := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
Path: dir,
|
Path: dir,
|
||||||
Workspace: config.Workspace{
|
Workspace: config.Workspace{
|
||||||
FilesPath: "/bundle",
|
FilePath: "/bundle",
|
||||||
},
|
},
|
||||||
Resources: config.Resources{
|
Resources: config.Resources{
|
||||||
Jobs: map[string]*resources.Job{
|
Jobs: map[string]*resources.Job{
|
||||||
"job": {
|
"job": {
|
||||||
Paths: resources.Paths{
|
Paths: paths.Paths{
|
||||||
ConfigFilePath: filepath.Join(dir, "job/resource.yml"),
|
ConfigFilePath: filepath.Join(dir, "job/resource.yml"),
|
||||||
},
|
},
|
||||||
JobSettings: &jobs.JobSettings{
|
JobSettings: &jobs.JobSettings{
|
||||||
|
@ -257,13 +297,33 @@ func TestTranslatePathsInSubdirectories(t *testing.T) {
|
||||||
PythonFile: "./my_python_file.py",
|
PythonFile: "./my_python_file.py",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
SparkJarTask: &jobs.SparkJarTask{
|
||||||
|
MainClassName: "HelloWorld",
|
||||||
|
},
|
||||||
|
Libraries: []compute.Library{
|
||||||
|
{Jar: "./dist/task.jar"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
SqlTask: &jobs.SqlTask{
|
||||||
|
File: &jobs.SqlTaskFile{
|
||||||
|
Path: "./my_sql_file.sql",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
DbtTask: &jobs.DbtTask{
|
||||||
|
ProjectDirectory: "./my_dbt_project",
|
||||||
|
},
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
Pipelines: map[string]*resources.Pipeline{
|
Pipelines: map[string]*resources.Pipeline{
|
||||||
"pipeline": {
|
"pipeline": {
|
||||||
Paths: resources.Paths{
|
Paths: paths.Paths{
|
||||||
ConfigFilePath: filepath.Join(dir, "pipeline/resource.yml"),
|
ConfigFilePath: filepath.Join(dir, "pipeline/resource.yml"),
|
||||||
},
|
},
|
||||||
|
|
||||||
|
@ -282,35 +342,50 @@ func TestTranslatePathsInSubdirectories(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
err := mutator.TranslatePaths().Apply(context.Background(), bundle)
|
err := bundle.Apply(context.Background(), b, mutator.TranslatePaths())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
assert.Equal(
|
assert.Equal(
|
||||||
t,
|
t,
|
||||||
"/bundle/job/my_python_file.py",
|
"/bundle/job/my_python_file.py",
|
||||||
bundle.Config.Resources.Jobs["job"].Tasks[0].SparkPythonTask.PythonFile,
|
b.Config.Resources.Jobs["job"].Tasks[0].SparkPythonTask.PythonFile,
|
||||||
|
)
|
||||||
|
assert.Equal(
|
||||||
|
t,
|
||||||
|
filepath.Join("job", "dist", "task.jar"),
|
||||||
|
b.Config.Resources.Jobs["job"].Tasks[1].Libraries[0].Jar,
|
||||||
|
)
|
||||||
|
assert.Equal(
|
||||||
|
t,
|
||||||
|
"/bundle/job/my_sql_file.sql",
|
||||||
|
b.Config.Resources.Jobs["job"].Tasks[2].SqlTask.File.Path,
|
||||||
|
)
|
||||||
|
assert.Equal(
|
||||||
|
t,
|
||||||
|
"/bundle/job/my_dbt_project",
|
||||||
|
b.Config.Resources.Jobs["job"].Tasks[3].DbtTask.ProjectDirectory,
|
||||||
)
|
)
|
||||||
|
|
||||||
assert.Equal(
|
assert.Equal(
|
||||||
t,
|
t,
|
||||||
"/bundle/pipeline/my_python_file.py",
|
"/bundle/pipeline/my_python_file.py",
|
||||||
bundle.Config.Resources.Pipelines["pipeline"].Libraries[0].File.Path,
|
b.Config.Resources.Pipelines["pipeline"].Libraries[0].File.Path,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestTranslatePathsOutsideBundleRoot(t *testing.T) {
|
func TestTranslatePathsOutsideBundleRoot(t *testing.T) {
|
||||||
dir := t.TempDir()
|
dir := t.TempDir()
|
||||||
|
|
||||||
bundle := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
Path: dir,
|
Path: dir,
|
||||||
Workspace: config.Workspace{
|
Workspace: config.Workspace{
|
||||||
FilesPath: "/bundle",
|
FilePath: "/bundle",
|
||||||
},
|
},
|
||||||
Resources: config.Resources{
|
Resources: config.Resources{
|
||||||
Jobs: map[string]*resources.Job{
|
Jobs: map[string]*resources.Job{
|
||||||
"job": {
|
"job": {
|
||||||
Paths: resources.Paths{
|
Paths: paths.Paths{
|
||||||
ConfigFilePath: filepath.Join(dir, "../resource.yml"),
|
ConfigFilePath: filepath.Join(dir, "../resource.yml"),
|
||||||
},
|
},
|
||||||
JobSettings: &jobs.JobSettings{
|
JobSettings: &jobs.JobSettings{
|
||||||
|
@ -328,20 +403,20 @@ func TestTranslatePathsOutsideBundleRoot(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
err := mutator.TranslatePaths().Apply(context.Background(), bundle)
|
err := bundle.Apply(context.Background(), b, mutator.TranslatePaths())
|
||||||
assert.ErrorContains(t, err, "is not contained in bundle root")
|
assert.ErrorContains(t, err, "is not contained in bundle root")
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestJobNotebookDoesNotExistError(t *testing.T) {
|
func TestJobNotebookDoesNotExistError(t *testing.T) {
|
||||||
dir := t.TempDir()
|
dir := t.TempDir()
|
||||||
|
|
||||||
bundle := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
Path: dir,
|
Path: dir,
|
||||||
Resources: config.Resources{
|
Resources: config.Resources{
|
||||||
Jobs: map[string]*resources.Job{
|
Jobs: map[string]*resources.Job{
|
||||||
"job": {
|
"job": {
|
||||||
Paths: resources.Paths{
|
Paths: paths.Paths{
|
||||||
ConfigFilePath: filepath.Join(dir, "fake.yml"),
|
ConfigFilePath: filepath.Join(dir, "fake.yml"),
|
||||||
},
|
},
|
||||||
JobSettings: &jobs.JobSettings{
|
JobSettings: &jobs.JobSettings{
|
||||||
|
@ -359,20 +434,20 @@ func TestJobNotebookDoesNotExistError(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
err := mutator.TranslatePaths().Apply(context.Background(), bundle)
|
err := bundle.Apply(context.Background(), b, mutator.TranslatePaths())
|
||||||
assert.EqualError(t, err, "notebook ./doesnt_exist.py not found")
|
assert.EqualError(t, err, "notebook ./doesnt_exist.py not found")
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestJobFileDoesNotExistError(t *testing.T) {
|
func TestJobFileDoesNotExistError(t *testing.T) {
|
||||||
dir := t.TempDir()
|
dir := t.TempDir()
|
||||||
|
|
||||||
bundle := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
Path: dir,
|
Path: dir,
|
||||||
Resources: config.Resources{
|
Resources: config.Resources{
|
||||||
Jobs: map[string]*resources.Job{
|
Jobs: map[string]*resources.Job{
|
||||||
"job": {
|
"job": {
|
||||||
Paths: resources.Paths{
|
Paths: paths.Paths{
|
||||||
ConfigFilePath: filepath.Join(dir, "fake.yml"),
|
ConfigFilePath: filepath.Join(dir, "fake.yml"),
|
||||||
},
|
},
|
||||||
JobSettings: &jobs.JobSettings{
|
JobSettings: &jobs.JobSettings{
|
||||||
|
@ -390,20 +465,20 @@ func TestJobFileDoesNotExistError(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
err := mutator.TranslatePaths().Apply(context.Background(), bundle)
|
err := bundle.Apply(context.Background(), b, mutator.TranslatePaths())
|
||||||
assert.EqualError(t, err, "file ./doesnt_exist.py not found")
|
assert.EqualError(t, err, "file ./doesnt_exist.py not found")
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestPipelineNotebookDoesNotExistError(t *testing.T) {
|
func TestPipelineNotebookDoesNotExistError(t *testing.T) {
|
||||||
dir := t.TempDir()
|
dir := t.TempDir()
|
||||||
|
|
||||||
bundle := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
Path: dir,
|
Path: dir,
|
||||||
Resources: config.Resources{
|
Resources: config.Resources{
|
||||||
Pipelines: map[string]*resources.Pipeline{
|
Pipelines: map[string]*resources.Pipeline{
|
||||||
"pipeline": {
|
"pipeline": {
|
||||||
Paths: resources.Paths{
|
Paths: paths.Paths{
|
||||||
ConfigFilePath: filepath.Join(dir, "fake.yml"),
|
ConfigFilePath: filepath.Join(dir, "fake.yml"),
|
||||||
},
|
},
|
||||||
PipelineSpec: &pipelines.PipelineSpec{
|
PipelineSpec: &pipelines.PipelineSpec{
|
||||||
|
@ -421,20 +496,20 @@ func TestPipelineNotebookDoesNotExistError(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
err := mutator.TranslatePaths().Apply(context.Background(), bundle)
|
err := bundle.Apply(context.Background(), b, mutator.TranslatePaths())
|
||||||
assert.EqualError(t, err, "notebook ./doesnt_exist.py not found")
|
assert.EqualError(t, err, "notebook ./doesnt_exist.py not found")
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestPipelineFileDoesNotExistError(t *testing.T) {
|
func TestPipelineFileDoesNotExistError(t *testing.T) {
|
||||||
dir := t.TempDir()
|
dir := t.TempDir()
|
||||||
|
|
||||||
bundle := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
Path: dir,
|
Path: dir,
|
||||||
Resources: config.Resources{
|
Resources: config.Resources{
|
||||||
Pipelines: map[string]*resources.Pipeline{
|
Pipelines: map[string]*resources.Pipeline{
|
||||||
"pipeline": {
|
"pipeline": {
|
||||||
Paths: resources.Paths{
|
Paths: paths.Paths{
|
||||||
ConfigFilePath: filepath.Join(dir, "fake.yml"),
|
ConfigFilePath: filepath.Join(dir, "fake.yml"),
|
||||||
},
|
},
|
||||||
PipelineSpec: &pipelines.PipelineSpec{
|
PipelineSpec: &pipelines.PipelineSpec{
|
||||||
|
@ -452,7 +527,7 @@ func TestPipelineFileDoesNotExistError(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
err := mutator.TranslatePaths().Apply(context.Background(), bundle)
|
err := bundle.Apply(context.Background(), b, mutator.TranslatePaths())
|
||||||
assert.EqualError(t, err, "file ./doesnt_exist.py not found")
|
assert.EqualError(t, err, "file ./doesnt_exist.py not found")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -460,16 +535,16 @@ func TestJobSparkPythonTaskWithNotebookSourceError(t *testing.T) {
|
||||||
dir := t.TempDir()
|
dir := t.TempDir()
|
||||||
touchNotebookFile(t, filepath.Join(dir, "my_notebook.py"))
|
touchNotebookFile(t, filepath.Join(dir, "my_notebook.py"))
|
||||||
|
|
||||||
bundle := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
Path: dir,
|
Path: dir,
|
||||||
Workspace: config.Workspace{
|
Workspace: config.Workspace{
|
||||||
FilesPath: "/bundle",
|
FilePath: "/bundle",
|
||||||
},
|
},
|
||||||
Resources: config.Resources{
|
Resources: config.Resources{
|
||||||
Jobs: map[string]*resources.Job{
|
Jobs: map[string]*resources.Job{
|
||||||
"job": {
|
"job": {
|
||||||
Paths: resources.Paths{
|
Paths: paths.Paths{
|
||||||
ConfigFilePath: filepath.Join(dir, "resource.yml"),
|
ConfigFilePath: filepath.Join(dir, "resource.yml"),
|
||||||
},
|
},
|
||||||
JobSettings: &jobs.JobSettings{
|
JobSettings: &jobs.JobSettings{
|
||||||
|
@ -487,7 +562,7 @@ func TestJobSparkPythonTaskWithNotebookSourceError(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
err := mutator.TranslatePaths().Apply(context.Background(), bundle)
|
err := bundle.Apply(context.Background(), b, mutator.TranslatePaths())
|
||||||
assert.ErrorContains(t, err, `expected a file for "tasks.spark_python_task.python_file" but got a notebook`)
|
assert.ErrorContains(t, err, `expected a file for "tasks.spark_python_task.python_file" but got a notebook`)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -495,16 +570,16 @@ func TestJobNotebookTaskWithFileSourceError(t *testing.T) {
|
||||||
dir := t.TempDir()
|
dir := t.TempDir()
|
||||||
touchEmptyFile(t, filepath.Join(dir, "my_file.py"))
|
touchEmptyFile(t, filepath.Join(dir, "my_file.py"))
|
||||||
|
|
||||||
bundle := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
Path: dir,
|
Path: dir,
|
||||||
Workspace: config.Workspace{
|
Workspace: config.Workspace{
|
||||||
FilesPath: "/bundle",
|
FilePath: "/bundle",
|
||||||
},
|
},
|
||||||
Resources: config.Resources{
|
Resources: config.Resources{
|
||||||
Jobs: map[string]*resources.Job{
|
Jobs: map[string]*resources.Job{
|
||||||
"job": {
|
"job": {
|
||||||
Paths: resources.Paths{
|
Paths: paths.Paths{
|
||||||
ConfigFilePath: filepath.Join(dir, "resource.yml"),
|
ConfigFilePath: filepath.Join(dir, "resource.yml"),
|
||||||
},
|
},
|
||||||
JobSettings: &jobs.JobSettings{
|
JobSettings: &jobs.JobSettings{
|
||||||
|
@ -522,7 +597,7 @@ func TestJobNotebookTaskWithFileSourceError(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
err := mutator.TranslatePaths().Apply(context.Background(), bundle)
|
err := bundle.Apply(context.Background(), b, mutator.TranslatePaths())
|
||||||
assert.ErrorContains(t, err, `expected a notebook for "tasks.notebook_task.notebook_path" but got a file`)
|
assert.ErrorContains(t, err, `expected a notebook for "tasks.notebook_task.notebook_path" but got a file`)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -530,16 +605,16 @@ func TestPipelineNotebookLibraryWithFileSourceError(t *testing.T) {
|
||||||
dir := t.TempDir()
|
dir := t.TempDir()
|
||||||
touchEmptyFile(t, filepath.Join(dir, "my_file.py"))
|
touchEmptyFile(t, filepath.Join(dir, "my_file.py"))
|
||||||
|
|
||||||
bundle := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
Path: dir,
|
Path: dir,
|
||||||
Workspace: config.Workspace{
|
Workspace: config.Workspace{
|
||||||
FilesPath: "/bundle",
|
FilePath: "/bundle",
|
||||||
},
|
},
|
||||||
Resources: config.Resources{
|
Resources: config.Resources{
|
||||||
Pipelines: map[string]*resources.Pipeline{
|
Pipelines: map[string]*resources.Pipeline{
|
||||||
"pipeline": {
|
"pipeline": {
|
||||||
Paths: resources.Paths{
|
Paths: paths.Paths{
|
||||||
ConfigFilePath: filepath.Join(dir, "resource.yml"),
|
ConfigFilePath: filepath.Join(dir, "resource.yml"),
|
||||||
},
|
},
|
||||||
PipelineSpec: &pipelines.PipelineSpec{
|
PipelineSpec: &pipelines.PipelineSpec{
|
||||||
|
@ -557,7 +632,7 @@ func TestPipelineNotebookLibraryWithFileSourceError(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
err := mutator.TranslatePaths().Apply(context.Background(), bundle)
|
err := bundle.Apply(context.Background(), b, mutator.TranslatePaths())
|
||||||
assert.ErrorContains(t, err, `expected a notebook for "libraries.notebook.path" but got a file`)
|
assert.ErrorContains(t, err, `expected a notebook for "libraries.notebook.path" but got a file`)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -565,16 +640,16 @@ func TestPipelineFileLibraryWithNotebookSourceError(t *testing.T) {
|
||||||
dir := t.TempDir()
|
dir := t.TempDir()
|
||||||
touchNotebookFile(t, filepath.Join(dir, "my_notebook.py"))
|
touchNotebookFile(t, filepath.Join(dir, "my_notebook.py"))
|
||||||
|
|
||||||
bundle := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
Path: dir,
|
Path: dir,
|
||||||
Workspace: config.Workspace{
|
Workspace: config.Workspace{
|
||||||
FilesPath: "/bundle",
|
FilePath: "/bundle",
|
||||||
},
|
},
|
||||||
Resources: config.Resources{
|
Resources: config.Resources{
|
||||||
Pipelines: map[string]*resources.Pipeline{
|
Pipelines: map[string]*resources.Pipeline{
|
||||||
"pipeline": {
|
"pipeline": {
|
||||||
Paths: resources.Paths{
|
Paths: paths.Paths{
|
||||||
ConfigFilePath: filepath.Join(dir, "resource.yml"),
|
ConfigFilePath: filepath.Join(dir, "resource.yml"),
|
||||||
},
|
},
|
||||||
PipelineSpec: &pipelines.PipelineSpec{
|
PipelineSpec: &pipelines.PipelineSpec{
|
||||||
|
@ -592,6 +667,6 @@ func TestPipelineFileLibraryWithNotebookSourceError(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
err := mutator.TranslatePaths().Apply(context.Background(), bundle)
|
err := bundle.Apply(context.Background(), b, mutator.TranslatePaths())
|
||||||
assert.ErrorContains(t, err, `expected a file for "libraries.file.path" but got a notebook`)
|
assert.ErrorContains(t, err, `expected a file for "libraries.file.path" but got a notebook`)
|
||||||
}
|
}
|
||||||
|
|
|
@ -10,7 +10,7 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestValidateGitDetailsMatchingBranches(t *testing.T) {
|
func TestValidateGitDetailsMatchingBranches(t *testing.T) {
|
||||||
bundle := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
Bundle: config.Bundle{
|
Bundle: config.Bundle{
|
||||||
Git: config.Git{
|
Git: config.Git{
|
||||||
|
@ -22,13 +22,13 @@ func TestValidateGitDetailsMatchingBranches(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
m := ValidateGitDetails()
|
m := ValidateGitDetails()
|
||||||
err := m.Apply(context.Background(), bundle)
|
err := bundle.Apply(context.Background(), b, m)
|
||||||
|
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestValidateGitDetailsNonMatchingBranches(t *testing.T) {
|
func TestValidateGitDetailsNonMatchingBranches(t *testing.T) {
|
||||||
bundle := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
Bundle: config.Bundle{
|
Bundle: config.Bundle{
|
||||||
Git: config.Git{
|
Git: config.Git{
|
||||||
|
@ -40,14 +40,14 @@ func TestValidateGitDetailsNonMatchingBranches(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
m := ValidateGitDetails()
|
m := ValidateGitDetails()
|
||||||
err := m.Apply(context.Background(), bundle)
|
err := bundle.Apply(context.Background(), b, m)
|
||||||
|
|
||||||
expectedError := "not on the right Git branch:\n expected according to configuration: main\n actual: feature\nuse --force to override"
|
expectedError := "not on the right Git branch:\n expected according to configuration: main\n actual: feature\nuse --force to override"
|
||||||
assert.EqualError(t, err, expectedError)
|
assert.EqualError(t, err, expectedError)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestValidateGitDetailsNotUsingGit(t *testing.T) {
|
func TestValidateGitDetailsNotUsingGit(t *testing.T) {
|
||||||
bundle := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
Bundle: config.Bundle{
|
Bundle: config.Bundle{
|
||||||
Git: config.Git{
|
Git: config.Git{
|
||||||
|
@ -59,7 +59,7 @@ func TestValidateGitDetailsNotUsingGit(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
m := ValidateGitDetails()
|
m := ValidateGitDetails()
|
||||||
err := m.Apply(context.Background(), bundle)
|
err := bundle.Apply(context.Background(), b, m)
|
||||||
|
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
package resources
|
package paths
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
@ -6,8 +6,8 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
type Paths struct {
|
type Paths struct {
|
||||||
// ConfigFilePath holds the path to the configuration file that
|
// Absolute path on the local file system to the configuration file that holds
|
||||||
// described the resource that this type is embedded in.
|
// the definition of this resource.
|
||||||
ConfigFilePath string `json:"-" bundle:"readonly"`
|
ConfigFilePath string `json:"-" bundle:"readonly"`
|
||||||
}
|
}
|
||||||
|
|
|
@ -13,6 +13,8 @@ type Resources struct {
|
||||||
|
|
||||||
Models map[string]*resources.MlflowModel `json:"models,omitempty"`
|
Models map[string]*resources.MlflowModel `json:"models,omitempty"`
|
||||||
Experiments map[string]*resources.MlflowExperiment `json:"experiments,omitempty"`
|
Experiments map[string]*resources.MlflowExperiment `json:"experiments,omitempty"`
|
||||||
|
ModelServingEndpoints map[string]*resources.ModelServingEndpoint `json:"model_serving_endpoints,omitempty"`
|
||||||
|
RegisteredModels map[string]*resources.RegisteredModel `json:"registered_models,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type UniqueResourceIdTracker struct {
|
type UniqueResourceIdTracker struct {
|
||||||
|
@ -93,6 +95,32 @@ func (r *Resources) VerifyUniqueResourceIdentifiers() (*UniqueResourceIdTracker,
|
||||||
tracker.Type[k] = "mlflow_experiment"
|
tracker.Type[k] = "mlflow_experiment"
|
||||||
tracker.ConfigPath[k] = r.Experiments[k].ConfigFilePath
|
tracker.ConfigPath[k] = r.Experiments[k].ConfigFilePath
|
||||||
}
|
}
|
||||||
|
for k := range r.ModelServingEndpoints {
|
||||||
|
if _, ok := tracker.Type[k]; ok {
|
||||||
|
return tracker, fmt.Errorf("multiple resources named %s (%s at %s, %s at %s)",
|
||||||
|
k,
|
||||||
|
tracker.Type[k],
|
||||||
|
tracker.ConfigPath[k],
|
||||||
|
"model_serving_endpoint",
|
||||||
|
r.ModelServingEndpoints[k].ConfigFilePath,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
tracker.Type[k] = "model_serving_endpoint"
|
||||||
|
tracker.ConfigPath[k] = r.ModelServingEndpoints[k].ConfigFilePath
|
||||||
|
}
|
||||||
|
for k := range r.RegisteredModels {
|
||||||
|
if _, ok := tracker.Type[k]; ok {
|
||||||
|
return tracker, fmt.Errorf("multiple resources named %s (%s at %s, %s at %s)",
|
||||||
|
k,
|
||||||
|
tracker.Type[k],
|
||||||
|
tracker.ConfigPath[k],
|
||||||
|
"registered_model",
|
||||||
|
r.RegisteredModels[k].ConfigFilePath,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
tracker.Type[k] = "registered_model"
|
||||||
|
tracker.ConfigPath[k] = r.RegisteredModels[k].ConfigFilePath
|
||||||
|
}
|
||||||
return tracker, nil
|
return tracker, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -112,4 +140,31 @@ func (r *Resources) SetConfigFilePath(path string) {
|
||||||
for _, e := range r.Experiments {
|
for _, e := range r.Experiments {
|
||||||
e.ConfigFilePath = path
|
e.ConfigFilePath = path
|
||||||
}
|
}
|
||||||
|
for _, e := range r.ModelServingEndpoints {
|
||||||
|
e.ConfigFilePath = path
|
||||||
|
}
|
||||||
|
for _, e := range r.RegisteredModels {
|
||||||
|
e.ConfigFilePath = path
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Merge iterates over all resources and merges chunks of the
|
||||||
|
// resource configuration that can be merged. For example, for
|
||||||
|
// jobs, this merges job cluster definitions and tasks that
|
||||||
|
// use the same `job_cluster_key`, or `task_key`, respectively.
|
||||||
|
func (r *Resources) Merge() error {
|
||||||
|
for _, job := range r.Jobs {
|
||||||
|
if err := job.MergeJobClusters(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := job.MergeTasks(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for _, pipeline := range r.Pipelines {
|
||||||
|
if err := pipeline.MergeClusters(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,9 @@
|
||||||
|
package resources
|
||||||
|
|
||||||
|
// Grant holds the grant level settings for a single principal in Unity Catalog.
|
||||||
|
// Multiple of these can be defined on any Unity Catalog resource.
|
||||||
|
type Grant struct {
|
||||||
|
Privileges []string `json:"privileges"`
|
||||||
|
|
||||||
|
Principal string `json:"principal"`
|
||||||
|
}
|
|
@ -1,12 +1,91 @@
|
||||||
package resources
|
package resources
|
||||||
|
|
||||||
import "github.com/databricks/databricks-sdk-go/service/jobs"
|
import (
|
||||||
|
"github.com/databricks/cli/bundle/config/paths"
|
||||||
|
"github.com/databricks/databricks-sdk-go/marshal"
|
||||||
|
"github.com/databricks/databricks-sdk-go/service/jobs"
|
||||||
|
"github.com/imdario/mergo"
|
||||||
|
)
|
||||||
|
|
||||||
type Job struct {
|
type Job struct {
|
||||||
ID string `json:"id,omitempty" bundle:"readonly"`
|
ID string `json:"id,omitempty" bundle:"readonly"`
|
||||||
Permissions []Permission `json:"permissions,omitempty"`
|
Permissions []Permission `json:"permissions,omitempty"`
|
||||||
|
|
||||||
Paths
|
paths.Paths
|
||||||
|
|
||||||
*jobs.JobSettings
|
*jobs.JobSettings
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *Job) UnmarshalJSON(b []byte) error {
|
||||||
|
return marshal.Unmarshal(b, s)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s Job) MarshalJSON() ([]byte, error) {
|
||||||
|
return marshal.Marshal(s)
|
||||||
|
}
|
||||||
|
|
||||||
|
// MergeJobClusters merges job clusters with the same key.
|
||||||
|
// The job clusters field is a slice, and as such, overrides are appended to it.
|
||||||
|
// We can identify a job cluster by its key, however, so we can use this key
|
||||||
|
// to figure out which definitions are actually overrides and merge them.
|
||||||
|
func (j *Job) MergeJobClusters() error {
|
||||||
|
keys := make(map[string]*jobs.JobCluster)
|
||||||
|
output := make([]jobs.JobCluster, 0, len(j.JobClusters))
|
||||||
|
|
||||||
|
// Target overrides are always appended, so we can iterate in natural order to
|
||||||
|
// first find the base definition, and merge instances we encounter later.
|
||||||
|
for i := range j.JobClusters {
|
||||||
|
key := j.JobClusters[i].JobClusterKey
|
||||||
|
|
||||||
|
// Register job cluster with key if not yet seen before.
|
||||||
|
ref, ok := keys[key]
|
||||||
|
if !ok {
|
||||||
|
output = append(output, j.JobClusters[i])
|
||||||
|
keys[key] = &output[len(output)-1]
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Merge this instance into the reference.
|
||||||
|
err := mergo.Merge(ref, &j.JobClusters[i], mergo.WithOverride, mergo.WithAppendSlice)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Overwrite resulting slice.
|
||||||
|
j.JobClusters = output
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// MergeTasks merges tasks with the same key.
|
||||||
|
// The tasks field is a slice, and as such, overrides are appended to it.
|
||||||
|
// We can identify a task by its task key, however, so we can use this key
|
||||||
|
// to figure out which definitions are actually overrides and merge them.
|
||||||
|
func (j *Job) MergeTasks() error {
|
||||||
|
keys := make(map[string]*jobs.Task)
|
||||||
|
tasks := make([]jobs.Task, 0, len(j.Tasks))
|
||||||
|
|
||||||
|
// Target overrides are always appended, so we can iterate in natural order to
|
||||||
|
// first find the base definition, and merge instances we encounter later.
|
||||||
|
for i := range j.Tasks {
|
||||||
|
key := j.Tasks[i].TaskKey
|
||||||
|
|
||||||
|
// Register the task with key if not yet seen before.
|
||||||
|
ref, ok := keys[key]
|
||||||
|
if !ok {
|
||||||
|
tasks = append(tasks, j.Tasks[i])
|
||||||
|
keys[key] = &tasks[len(tasks)-1]
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Merge this instance into the reference.
|
||||||
|
err := mergo.Merge(ref, &j.Tasks[i], mergo.WithOverride, mergo.WithAppendSlice)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Overwrite resulting slice.
|
||||||
|
j.Tasks = tasks
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
|
@ -0,0 +1,116 @@
|
||||||
|
package resources
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/databricks/databricks-sdk-go/service/compute"
|
||||||
|
"github.com/databricks/databricks-sdk-go/service/jobs"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestJobMergeJobClusters(t *testing.T) {
|
||||||
|
j := &Job{
|
||||||
|
JobSettings: &jobs.JobSettings{
|
||||||
|
JobClusters: []jobs.JobCluster{
|
||||||
|
{
|
||||||
|
JobClusterKey: "foo",
|
||||||
|
NewCluster: &compute.ClusterSpec{
|
||||||
|
SparkVersion: "13.3.x-scala2.12",
|
||||||
|
NodeTypeId: "i3.xlarge",
|
||||||
|
NumWorkers: 2,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
JobClusterKey: "bar",
|
||||||
|
NewCluster: &compute.ClusterSpec{
|
||||||
|
SparkVersion: "10.4.x-scala2.12",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
JobClusterKey: "foo",
|
||||||
|
NewCluster: &compute.ClusterSpec{
|
||||||
|
NodeTypeId: "i3.2xlarge",
|
||||||
|
NumWorkers: 4,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
err := j.MergeJobClusters()
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
assert.Len(t, j.JobClusters, 2)
|
||||||
|
assert.Equal(t, "foo", j.JobClusters[0].JobClusterKey)
|
||||||
|
assert.Equal(t, "bar", j.JobClusters[1].JobClusterKey)
|
||||||
|
|
||||||
|
// This job cluster was merged with a subsequent one.
|
||||||
|
jc0 := j.JobClusters[0].NewCluster
|
||||||
|
assert.Equal(t, "13.3.x-scala2.12", jc0.SparkVersion)
|
||||||
|
assert.Equal(t, "i3.2xlarge", jc0.NodeTypeId)
|
||||||
|
assert.Equal(t, 4, jc0.NumWorkers)
|
||||||
|
|
||||||
|
// This job cluster was left untouched.
|
||||||
|
jc1 := j.JobClusters[1].NewCluster
|
||||||
|
assert.Equal(t, "10.4.x-scala2.12", jc1.SparkVersion)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestJobMergeTasks(t *testing.T) {
|
||||||
|
j := &Job{
|
||||||
|
JobSettings: &jobs.JobSettings{
|
||||||
|
Tasks: []jobs.Task{
|
||||||
|
{
|
||||||
|
TaskKey: "foo",
|
||||||
|
NewCluster: &compute.ClusterSpec{
|
||||||
|
SparkVersion: "13.3.x-scala2.12",
|
||||||
|
NodeTypeId: "i3.xlarge",
|
||||||
|
NumWorkers: 2,
|
||||||
|
},
|
||||||
|
Libraries: []compute.Library{
|
||||||
|
{Whl: "package1"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
TaskKey: "bar",
|
||||||
|
NewCluster: &compute.ClusterSpec{
|
||||||
|
SparkVersion: "10.4.x-scala2.12",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
TaskKey: "foo",
|
||||||
|
NewCluster: &compute.ClusterSpec{
|
||||||
|
NodeTypeId: "i3.2xlarge",
|
||||||
|
NumWorkers: 4,
|
||||||
|
},
|
||||||
|
Libraries: []compute.Library{
|
||||||
|
{Pypi: &compute.PythonPyPiLibrary{
|
||||||
|
Package: "package2",
|
||||||
|
}},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
err := j.MergeTasks()
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
assert.Len(t, j.Tasks, 2)
|
||||||
|
assert.Equal(t, "foo", j.Tasks[0].TaskKey)
|
||||||
|
assert.Equal(t, "bar", j.Tasks[1].TaskKey)
|
||||||
|
|
||||||
|
// This task was merged with a subsequent one.
|
||||||
|
task0 := j.Tasks[0]
|
||||||
|
cluster := task0.NewCluster
|
||||||
|
assert.Equal(t, "13.3.x-scala2.12", cluster.SparkVersion)
|
||||||
|
assert.Equal(t, "i3.2xlarge", cluster.NodeTypeId)
|
||||||
|
assert.Equal(t, 4, cluster.NumWorkers)
|
||||||
|
assert.Len(t, task0.Libraries, 2)
|
||||||
|
assert.Equal(t, task0.Libraries[0].Whl, "package1")
|
||||||
|
assert.Equal(t, task0.Libraries[1].Pypi.Package, "package2")
|
||||||
|
|
||||||
|
// This task was left untouched.
|
||||||
|
task1 := j.Tasks[1].NewCluster
|
||||||
|
assert.Equal(t, "10.4.x-scala2.12", task1.SparkVersion)
|
||||||
|
}
|
|
@ -1,11 +1,23 @@
|
||||||
package resources
|
package resources
|
||||||
|
|
||||||
import "github.com/databricks/databricks-sdk-go/service/ml"
|
import (
|
||||||
|
"github.com/databricks/cli/bundle/config/paths"
|
||||||
|
"github.com/databricks/databricks-sdk-go/marshal"
|
||||||
|
"github.com/databricks/databricks-sdk-go/service/ml"
|
||||||
|
)
|
||||||
|
|
||||||
type MlflowExperiment struct {
|
type MlflowExperiment struct {
|
||||||
Permissions []Permission `json:"permissions,omitempty"`
|
Permissions []Permission `json:"permissions,omitempty"`
|
||||||
|
|
||||||
Paths
|
paths.Paths
|
||||||
|
|
||||||
*ml.Experiment
|
*ml.Experiment
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *MlflowExperiment) UnmarshalJSON(b []byte) error {
|
||||||
|
return marshal.Unmarshal(b, s)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s MlflowExperiment) MarshalJSON() ([]byte, error) {
|
||||||
|
return marshal.Marshal(s)
|
||||||
|
}
|
||||||
|
|
|
@ -1,11 +1,23 @@
|
||||||
package resources
|
package resources
|
||||||
|
|
||||||
import "github.com/databricks/databricks-sdk-go/service/ml"
|
import (
|
||||||
|
"github.com/databricks/cli/bundle/config/paths"
|
||||||
|
"github.com/databricks/databricks-sdk-go/marshal"
|
||||||
|
"github.com/databricks/databricks-sdk-go/service/ml"
|
||||||
|
)
|
||||||
|
|
||||||
type MlflowModel struct {
|
type MlflowModel struct {
|
||||||
Permissions []Permission `json:"permissions,omitempty"`
|
Permissions []Permission `json:"permissions,omitempty"`
|
||||||
|
|
||||||
Paths
|
paths.Paths
|
||||||
|
|
||||||
*ml.Model
|
*ml.Model
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *MlflowModel) UnmarshalJSON(b []byte) error {
|
||||||
|
return marshal.Unmarshal(b, s)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s MlflowModel) MarshalJSON() ([]byte, error) {
|
||||||
|
return marshal.Marshal(s)
|
||||||
|
}
|
||||||
|
|
|
@ -0,0 +1,33 @@
|
||||||
|
package resources
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/databricks/cli/bundle/config/paths"
|
||||||
|
"github.com/databricks/databricks-sdk-go/marshal"
|
||||||
|
"github.com/databricks/databricks-sdk-go/service/serving"
|
||||||
|
)
|
||||||
|
|
||||||
|
type ModelServingEndpoint struct {
|
||||||
|
// This represents the input args for terraform, and will get converted
|
||||||
|
// to a HCL representation for CRUD
|
||||||
|
*serving.CreateServingEndpoint
|
||||||
|
|
||||||
|
// This represents the id (ie serving_endpoint_id) that can be used
|
||||||
|
// as a reference in other resources. This value is returned by terraform.
|
||||||
|
ID string
|
||||||
|
|
||||||
|
// Path to config file where the resource is defined. All bundle resources
|
||||||
|
// include this for interpolation purposes.
|
||||||
|
paths.Paths
|
||||||
|
|
||||||
|
// This is a resource agnostic implementation of permissions for ACLs.
|
||||||
|
// Implementation could be different based on the resource type.
|
||||||
|
Permissions []Permission `json:"permissions,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *ModelServingEndpoint) UnmarshalJSON(b []byte) error {
|
||||||
|
return marshal.Unmarshal(b, s)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s ModelServingEndpoint) MarshalJSON() ([]byte, error) {
|
||||||
|
return marshal.Marshal(s)
|
||||||
|
}
|
|
@ -1,12 +1,74 @@
|
||||||
package resources
|
package resources
|
||||||
|
|
||||||
import "github.com/databricks/databricks-sdk-go/service/pipelines"
|
import (
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/databricks/cli/bundle/config/paths"
|
||||||
|
"github.com/databricks/databricks-sdk-go/marshal"
|
||||||
|
"github.com/databricks/databricks-sdk-go/service/pipelines"
|
||||||
|
"github.com/imdario/mergo"
|
||||||
|
)
|
||||||
|
|
||||||
type Pipeline struct {
|
type Pipeline struct {
|
||||||
ID string `json:"id,omitempty" bundle:"readonly"`
|
ID string `json:"id,omitempty" bundle:"readonly"`
|
||||||
Permissions []Permission `json:"permissions,omitempty"`
|
Permissions []Permission `json:"permissions,omitempty"`
|
||||||
|
|
||||||
Paths
|
paths.Paths
|
||||||
|
|
||||||
*pipelines.PipelineSpec
|
*pipelines.PipelineSpec
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *Pipeline) UnmarshalJSON(b []byte) error {
|
||||||
|
return marshal.Unmarshal(b, s)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s Pipeline) MarshalJSON() ([]byte, error) {
|
||||||
|
return marshal.Marshal(s)
|
||||||
|
}
|
||||||
|
|
||||||
|
// MergeClusters merges cluster definitions with same label.
|
||||||
|
// The clusters field is a slice, and as such, overrides are appended to it.
|
||||||
|
// We can identify a cluster by its label, however, so we can use this label
|
||||||
|
// to figure out which definitions are actually overrides and merge them.
|
||||||
|
//
|
||||||
|
// Note: the cluster label is optional and defaults to 'default'.
|
||||||
|
// We therefore ALSO merge all clusters without a label.
|
||||||
|
func (p *Pipeline) MergeClusters() error {
|
||||||
|
clusters := make(map[string]*pipelines.PipelineCluster)
|
||||||
|
output := make([]pipelines.PipelineCluster, 0, len(p.Clusters))
|
||||||
|
|
||||||
|
// Normalize cluster labels.
|
||||||
|
// If empty, this defaults to "default".
|
||||||
|
// To make matching case insensitive, labels are lowercased.
|
||||||
|
for i := range p.Clusters {
|
||||||
|
label := p.Clusters[i].Label
|
||||||
|
if label == "" {
|
||||||
|
label = "default"
|
||||||
|
}
|
||||||
|
p.Clusters[i].Label = strings.ToLower(label)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Target overrides are always appended, so we can iterate in natural order to
|
||||||
|
// first find the base definition, and merge instances we encounter later.
|
||||||
|
for i := range p.Clusters {
|
||||||
|
label := p.Clusters[i].Label
|
||||||
|
|
||||||
|
// Register pipeline cluster with label if not yet seen before.
|
||||||
|
ref, ok := clusters[label]
|
||||||
|
if !ok {
|
||||||
|
output = append(output, p.Clusters[i])
|
||||||
|
clusters[label] = &output[len(output)-1]
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Merge this instance into the reference.
|
||||||
|
err := mergo.Merge(ref, &p.Clusters[i], mergo.WithOverride, mergo.WithAppendSlice)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Overwrite resulting slice.
|
||||||
|
p.Clusters = output
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
|
@ -0,0 +1,76 @@
|
||||||
|
package resources
|
||||||
|
|
||||||
|
import (
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/databricks/databricks-sdk-go/service/pipelines"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestPipelineMergeClusters(t *testing.T) {
|
||||||
|
p := &Pipeline{
|
||||||
|
PipelineSpec: &pipelines.PipelineSpec{
|
||||||
|
Clusters: []pipelines.PipelineCluster{
|
||||||
|
{
|
||||||
|
NodeTypeId: "i3.xlarge",
|
||||||
|
NumWorkers: 2,
|
||||||
|
PolicyId: "1234",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Label: "maintenance",
|
||||||
|
NodeTypeId: "i3.2xlarge",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
NodeTypeId: "i3.2xlarge",
|
||||||
|
NumWorkers: 4,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
err := p.MergeClusters()
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
assert.Len(t, p.Clusters, 2)
|
||||||
|
assert.Equal(t, "default", p.Clusters[0].Label)
|
||||||
|
assert.Equal(t, "maintenance", p.Clusters[1].Label)
|
||||||
|
|
||||||
|
// The default cluster was merged with a subsequent one.
|
||||||
|
pc0 := p.Clusters[0]
|
||||||
|
assert.Equal(t, "i3.2xlarge", pc0.NodeTypeId)
|
||||||
|
assert.Equal(t, 4, pc0.NumWorkers)
|
||||||
|
assert.Equal(t, "1234", pc0.PolicyId)
|
||||||
|
|
||||||
|
// The maintenance cluster was left untouched.
|
||||||
|
pc1 := p.Clusters[1]
|
||||||
|
assert.Equal(t, "i3.2xlarge", pc1.NodeTypeId)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPipelineMergeClustersCaseInsensitive(t *testing.T) {
|
||||||
|
p := &Pipeline{
|
||||||
|
PipelineSpec: &pipelines.PipelineSpec{
|
||||||
|
Clusters: []pipelines.PipelineCluster{
|
||||||
|
{
|
||||||
|
Label: "default",
|
||||||
|
NumWorkers: 2,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Label: "DEFAULT",
|
||||||
|
NumWorkers: 4,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
err := p.MergeClusters()
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
assert.Len(t, p.Clusters, 1)
|
||||||
|
|
||||||
|
// The default cluster was merged with a subsequent one.
|
||||||
|
pc0 := p.Clusters[0]
|
||||||
|
assert.Equal(t, "default", strings.ToLower(pc0.Label))
|
||||||
|
assert.Equal(t, 4, pc0.NumWorkers)
|
||||||
|
}
|
|
@ -0,0 +1,34 @@
|
||||||
|
package resources
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/databricks/cli/bundle/config/paths"
|
||||||
|
"github.com/databricks/databricks-sdk-go/marshal"
|
||||||
|
"github.com/databricks/databricks-sdk-go/service/catalog"
|
||||||
|
)
|
||||||
|
|
||||||
|
type RegisteredModel struct {
|
||||||
|
// This is a resource agnostic implementation of grants.
|
||||||
|
// Implementation could be different based on the resource type.
|
||||||
|
Grants []Grant `json:"grants,omitempty"`
|
||||||
|
|
||||||
|
// This represents the id which is the full name of the model
|
||||||
|
// (catalog_name.schema_name.model_name) that can be used
|
||||||
|
// as a reference in other resources. This value is returned by terraform.
|
||||||
|
ID string
|
||||||
|
|
||||||
|
// Path to config file where the resource is defined. All bundle resources
|
||||||
|
// include this for interpolation purposes.
|
||||||
|
paths.Paths
|
||||||
|
|
||||||
|
// This represents the input args for terraform, and will get converted
|
||||||
|
// to a HCL representation for CRUD
|
||||||
|
*catalog.CreateRegisteredModelRequest
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *RegisteredModel) UnmarshalJSON(b []byte) error {
|
||||||
|
return marshal.Unmarshal(b, s)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s RegisteredModel) MarshalJSON() ([]byte, error) {
|
||||||
|
return marshal.Marshal(s)
|
||||||
|
}
|
|
@ -3,6 +3,7 @@ package config
|
||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"github.com/databricks/cli/bundle/config/paths"
|
||||||
"github.com/databricks/cli/bundle/config/resources"
|
"github.com/databricks/cli/bundle/config/resources"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
)
|
)
|
||||||
|
@ -11,21 +12,21 @@ func TestVerifyUniqueResourceIdentifiers(t *testing.T) {
|
||||||
r := Resources{
|
r := Resources{
|
||||||
Jobs: map[string]*resources.Job{
|
Jobs: map[string]*resources.Job{
|
||||||
"foo": {
|
"foo": {
|
||||||
Paths: resources.Paths{
|
Paths: paths.Paths{
|
||||||
ConfigFilePath: "foo.yml",
|
ConfigFilePath: "foo.yml",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
Models: map[string]*resources.MlflowModel{
|
Models: map[string]*resources.MlflowModel{
|
||||||
"bar": {
|
"bar": {
|
||||||
Paths: resources.Paths{
|
Paths: paths.Paths{
|
||||||
ConfigFilePath: "bar.yml",
|
ConfigFilePath: "bar.yml",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
Experiments: map[string]*resources.MlflowExperiment{
|
Experiments: map[string]*resources.MlflowExperiment{
|
||||||
"foo": {
|
"foo": {
|
||||||
Paths: resources.Paths{
|
Paths: paths.Paths{
|
||||||
ConfigFilePath: "foo2.yml",
|
ConfigFilePath: "foo2.yml",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -39,14 +40,14 @@ func TestVerifySafeMerge(t *testing.T) {
|
||||||
r := Resources{
|
r := Resources{
|
||||||
Jobs: map[string]*resources.Job{
|
Jobs: map[string]*resources.Job{
|
||||||
"foo": {
|
"foo": {
|
||||||
Paths: resources.Paths{
|
Paths: paths.Paths{
|
||||||
ConfigFilePath: "foo.yml",
|
ConfigFilePath: "foo.yml",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
Models: map[string]*resources.MlflowModel{
|
Models: map[string]*resources.MlflowModel{
|
||||||
"bar": {
|
"bar": {
|
||||||
Paths: resources.Paths{
|
Paths: paths.Paths{
|
||||||
ConfigFilePath: "bar.yml",
|
ConfigFilePath: "bar.yml",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -55,7 +56,7 @@ func TestVerifySafeMerge(t *testing.T) {
|
||||||
other := Resources{
|
other := Resources{
|
||||||
Pipelines: map[string]*resources.Pipeline{
|
Pipelines: map[string]*resources.Pipeline{
|
||||||
"foo": {
|
"foo": {
|
||||||
Paths: resources.Paths{
|
Paths: paths.Paths{
|
||||||
ConfigFilePath: "foo2.yml",
|
ConfigFilePath: "foo2.yml",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -69,14 +70,14 @@ func TestVerifySafeMergeForSameResourceType(t *testing.T) {
|
||||||
r := Resources{
|
r := Resources{
|
||||||
Jobs: map[string]*resources.Job{
|
Jobs: map[string]*resources.Job{
|
||||||
"foo": {
|
"foo": {
|
||||||
Paths: resources.Paths{
|
Paths: paths.Paths{
|
||||||
ConfigFilePath: "foo.yml",
|
ConfigFilePath: "foo.yml",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
Models: map[string]*resources.MlflowModel{
|
Models: map[string]*resources.MlflowModel{
|
||||||
"bar": {
|
"bar": {
|
||||||
Paths: resources.Paths{
|
Paths: paths.Paths{
|
||||||
ConfigFilePath: "bar.yml",
|
ConfigFilePath: "bar.yml",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -85,7 +86,7 @@ func TestVerifySafeMergeForSameResourceType(t *testing.T) {
|
||||||
other := Resources{
|
other := Resources{
|
||||||
Jobs: map[string]*resources.Job{
|
Jobs: map[string]*resources.Job{
|
||||||
"foo": {
|
"foo": {
|
||||||
Paths: resources.Paths{
|
Paths: paths.Paths{
|
||||||
ConfigFilePath: "foo2.yml",
|
ConfigFilePath: "foo2.yml",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -94,3 +95,33 @@ func TestVerifySafeMergeForSameResourceType(t *testing.T) {
|
||||||
err := r.VerifySafeMerge(&other)
|
err := r.VerifySafeMerge(&other)
|
||||||
assert.ErrorContains(t, err, "multiple resources named foo (job at foo.yml, job at foo2.yml)")
|
assert.ErrorContains(t, err, "multiple resources named foo (job at foo.yml, job at foo2.yml)")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestVerifySafeMergeForRegisteredModels(t *testing.T) {
|
||||||
|
r := Resources{
|
||||||
|
Jobs: map[string]*resources.Job{
|
||||||
|
"foo": {
|
||||||
|
Paths: paths.Paths{
|
||||||
|
ConfigFilePath: "foo.yml",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
RegisteredModels: map[string]*resources.RegisteredModel{
|
||||||
|
"bar": {
|
||||||
|
Paths: paths.Paths{
|
||||||
|
ConfigFilePath: "bar.yml",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
other := Resources{
|
||||||
|
RegisteredModels: map[string]*resources.RegisteredModel{
|
||||||
|
"bar": {
|
||||||
|
Paths: paths.Paths{
|
||||||
|
ConfigFilePath: "bar2.yml",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
err := r.VerifySafeMerge(&other)
|
||||||
|
assert.ErrorContains(t, err, "multiple resources named bar (registered_model at bar.yml, registered_model at bar2.yml)")
|
||||||
|
}
|
||||||
|
|
|
@ -6,41 +6,13 @@ import (
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
"github.com/databricks/cli/bundle/config/resources"
|
||||||
"github.com/databricks/cli/bundle/config/variable"
|
"github.com/databricks/cli/bundle/config/variable"
|
||||||
|
"github.com/databricks/databricks-sdk-go/service/jobs"
|
||||||
"github.com/ghodss/yaml"
|
"github.com/ghodss/yaml"
|
||||||
"github.com/imdario/mergo"
|
"github.com/imdario/mergo"
|
||||||
)
|
)
|
||||||
|
|
||||||
type ConfigFileNames []string
|
|
||||||
|
|
||||||
// FileNames contains allowed names of bundle configuration files.
|
|
||||||
var FileNames = ConfigFileNames{"databricks.yml", "databricks.yaml", "bundle.yml", "bundle.yaml"}
|
|
||||||
|
|
||||||
func (c ConfigFileNames) FindInPath(path string) (string, error) {
|
|
||||||
result := ""
|
|
||||||
var firstErr error
|
|
||||||
|
|
||||||
for _, file := range c {
|
|
||||||
filePath := filepath.Join(path, file)
|
|
||||||
_, err := os.Stat(filePath)
|
|
||||||
if err == nil {
|
|
||||||
if result != "" {
|
|
||||||
return "", fmt.Errorf("multiple bundle root configuration files found in %s", path)
|
|
||||||
}
|
|
||||||
result = filePath
|
|
||||||
} else {
|
|
||||||
if firstErr == nil {
|
|
||||||
firstErr = err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if result == "" {
|
|
||||||
return "", firstErr
|
|
||||||
}
|
|
||||||
return result, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type Root struct {
|
type Root struct {
|
||||||
// Path contains the directory path to the root of the bundle.
|
// Path contains the directory path to the root of the bundle.
|
||||||
// It is set when loading `databricks.yml`.
|
// It is set when loading `databricks.yml`.
|
||||||
|
@ -51,7 +23,7 @@ type Root struct {
|
||||||
|
|
||||||
// Bundle contains details about this bundle, such as its name,
|
// Bundle contains details about this bundle, such as its name,
|
||||||
// version of the spec (TODO), default cluster, default warehouse, etc.
|
// version of the spec (TODO), default cluster, default warehouse, etc.
|
||||||
Bundle Bundle `json:"bundle"`
|
Bundle Bundle `json:"bundle,omitempty"`
|
||||||
|
|
||||||
// Include specifies a list of patterns of file names to load and
|
// Include specifies a list of patterns of file names to load and
|
||||||
// merge into the this configuration. Only includes defined in the root
|
// merge into the this configuration. Only includes defined in the root
|
||||||
|
@ -63,54 +35,82 @@ type Root struct {
|
||||||
Workspace Workspace `json:"workspace,omitempty"`
|
Workspace Workspace `json:"workspace,omitempty"`
|
||||||
|
|
||||||
// Artifacts contains a description of all code artifacts in this bundle.
|
// Artifacts contains a description of all code artifacts in this bundle.
|
||||||
Artifacts map[string]*Artifact `json:"artifacts,omitempty"`
|
Artifacts Artifacts `json:"artifacts,omitempty"`
|
||||||
|
|
||||||
// Resources contains a description of all Databricks resources
|
// Resources contains a description of all Databricks resources
|
||||||
// to deploy in this bundle (e.g. jobs, pipelines, etc.).
|
// to deploy in this bundle (e.g. jobs, pipelines, etc.).
|
||||||
Resources Resources `json:"resources,omitempty"`
|
Resources Resources `json:"resources,omitempty"`
|
||||||
|
|
||||||
// Environments can be used to differentiate settings and resources between
|
// Targets can be used to differentiate settings and resources between
|
||||||
// bundle deployment environments (e.g. development, staging, production).
|
// bundle deployment targets (e.g. development, staging, production).
|
||||||
// If not specified, the code below initializes this field with a
|
// If not specified, the code below initializes this field with a
|
||||||
// single default-initialized environment called "default".
|
// single default-initialized target called "default".
|
||||||
Environments map[string]*Environment `json:"environments,omitempty"`
|
Targets map[string]*Target `json:"targets,omitempty"`
|
||||||
|
|
||||||
|
// DEPRECATED. Left for backward compatibility with Targets
|
||||||
|
Environments map[string]*Target `json:"environments,omitempty" bundle:"deprecated"`
|
||||||
|
|
||||||
|
// Sync section specifies options for files synchronization
|
||||||
|
Sync Sync `json:"sync,omitempty"`
|
||||||
|
|
||||||
|
// RunAs section allows to define an execution identity for jobs and pipelines runs
|
||||||
|
RunAs *jobs.JobRunAs `json:"run_as,omitempty"`
|
||||||
|
|
||||||
|
Experimental *Experimental `json:"experimental,omitempty"`
|
||||||
|
|
||||||
|
// Permissions section allows to define permissions which will be
|
||||||
|
// applied to all resources defined in bundle
|
||||||
|
Permissions []resources.Permission `json:"permissions,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Load loads the bundle configuration file at the specified path.
|
||||||
func Load(path string) (*Root, error) {
|
func Load(path string) (*Root, error) {
|
||||||
|
raw, err := os.ReadFile(path)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
var r Root
|
var r Root
|
||||||
|
err = yaml.Unmarshal(raw, &r)
|
||||||
stat, err := os.Stat(path)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, fmt.Errorf("failed to load %s: %w", path, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// If we were given a directory, assume this is the bundle root.
|
if r.Environments != nil && r.Targets != nil {
|
||||||
if stat.IsDir() {
|
return nil, fmt.Errorf("both 'environments' and 'targets' are specified, only 'targets' should be used: %s", path)
|
||||||
path, err = FileNames.FindInPath(path)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := r.Load(path); err != nil {
|
if r.Environments != nil {
|
||||||
return nil, err
|
//TODO: add a command line notice that this is a deprecated option.
|
||||||
|
r.Targets = r.Environments
|
||||||
}
|
}
|
||||||
|
|
||||||
return &r, nil
|
r.Path = filepath.Dir(path)
|
||||||
|
r.SetConfigFilePath(path)
|
||||||
|
|
||||||
|
_, err = r.Resources.VerifyUniqueResourceIdentifiers()
|
||||||
|
return &r, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetConfigFilePath configures the path that its configuration
|
// SetConfigFilePath configures the path that its configuration
|
||||||
// was loaded from in configuration leafs that require it.
|
// was loaded from in configuration leafs that require it.
|
||||||
func (r *Root) SetConfigFilePath(path string) {
|
func (r *Root) SetConfigFilePath(path string) {
|
||||||
r.Resources.SetConfigFilePath(path)
|
r.Resources.SetConfigFilePath(path)
|
||||||
if r.Environments != nil {
|
if r.Artifacts != nil {
|
||||||
for _, env := range r.Environments {
|
r.Artifacts.SetConfigFilePath(path)
|
||||||
|
}
|
||||||
|
|
||||||
|
if r.Targets != nil {
|
||||||
|
for _, env := range r.Targets {
|
||||||
if env == nil {
|
if env == nil {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if env.Resources != nil {
|
if env.Resources != nil {
|
||||||
env.Resources.SetConfigFilePath(path)
|
env.Resources.SetConfigFilePath(path)
|
||||||
}
|
}
|
||||||
|
if env.Artifacts != nil {
|
||||||
|
env.Artifacts.SetConfigFilePath(path)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -138,75 +138,69 @@ func (r *Root) InitializeVariables(vars []string) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *Root) Load(path string) error {
|
|
||||||
raw, err := os.ReadFile(path)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
err = yaml.Unmarshal(raw, r)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to load %s: %w", path, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
r.Path = filepath.Dir(path)
|
|
||||||
r.SetConfigFilePath(path)
|
|
||||||
|
|
||||||
_, err = r.Resources.VerifyUniqueResourceIdentifiers()
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *Root) Merge(other *Root) error {
|
func (r *Root) Merge(other *Root) error {
|
||||||
|
err := r.Sync.Merge(r, other)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
other.Sync = Sync{}
|
||||||
|
|
||||||
// TODO: when hooking into merge semantics, disallow setting path on the target instance.
|
// TODO: when hooking into merge semantics, disallow setting path on the target instance.
|
||||||
other.Path = ""
|
other.Path = ""
|
||||||
|
|
||||||
// Check for safe merge, protecting against duplicate resource identifiers
|
// Check for safe merge, protecting against duplicate resource identifiers
|
||||||
err := r.Resources.VerifySafeMerge(&other.Resources)
|
err = r.Resources.VerifySafeMerge(&other.Resources)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: define and test semantics for merging.
|
// TODO: define and test semantics for merging.
|
||||||
return mergo.MergeWithOverwrite(r, other)
|
return mergo.Merge(r, other, mergo.WithOverride)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *Root) MergeEnvironment(env *Environment) error {
|
func (r *Root) MergeTargetOverrides(target *Target) error {
|
||||||
var err error
|
var err error
|
||||||
|
|
||||||
// Environment may be nil if it's empty.
|
// Target may be nil if it's empty.
|
||||||
if env == nil {
|
if target == nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if env.Bundle != nil {
|
if target.Bundle != nil {
|
||||||
err = mergo.MergeWithOverwrite(&r.Bundle, env.Bundle)
|
err = mergo.Merge(&r.Bundle, target.Bundle, mergo.WithOverride)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if env.Workspace != nil {
|
if target.Workspace != nil {
|
||||||
err = mergo.MergeWithOverwrite(&r.Workspace, env.Workspace)
|
err = mergo.Merge(&r.Workspace, target.Workspace, mergo.WithOverride)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if env.Artifacts != nil {
|
if target.Artifacts != nil {
|
||||||
err = mergo.Merge(&r.Artifacts, env.Artifacts, mergo.WithAppendSlice)
|
err = mergo.Merge(&r.Artifacts, target.Artifacts, mergo.WithOverride, mergo.WithAppendSlice)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if env.Resources != nil {
|
if target.Resources != nil {
|
||||||
err = mergo.Merge(&r.Resources, env.Resources, mergo.WithAppendSlice)
|
err = mergo.Merge(&r.Resources, target.Resources, mergo.WithOverride, mergo.WithAppendSlice)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = r.Resources.Merge()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if env.Variables != nil {
|
if target.Variables != nil {
|
||||||
for k, v := range env.Variables {
|
for k, v := range target.Variables {
|
||||||
variable, ok := r.Variables[k]
|
variable, ok := r.Variables[k]
|
||||||
if !ok {
|
if !ok {
|
||||||
return fmt.Errorf("variable %s is not defined but is assigned a value", k)
|
return fmt.Errorf("variable %s is not defined but is assigned a value", k)
|
||||||
|
@ -217,24 +211,42 @@ func (r *Root) MergeEnvironment(env *Environment) error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if env.Mode != "" {
|
if target.RunAs != nil {
|
||||||
r.Bundle.Mode = env.Mode
|
r.RunAs = target.RunAs
|
||||||
}
|
}
|
||||||
|
|
||||||
if env.ComputeID != "" {
|
if target.Mode != "" {
|
||||||
r.Bundle.ComputeID = env.ComputeID
|
r.Bundle.Mode = target.Mode
|
||||||
|
}
|
||||||
|
|
||||||
|
if target.ComputeID != "" {
|
||||||
|
r.Bundle.ComputeID = target.ComputeID
|
||||||
}
|
}
|
||||||
|
|
||||||
git := &r.Bundle.Git
|
git := &r.Bundle.Git
|
||||||
if env.Git.Branch != "" {
|
if target.Git.Branch != "" {
|
||||||
git.Branch = env.Git.Branch
|
git.Branch = target.Git.Branch
|
||||||
git.Inferred = false
|
git.Inferred = false
|
||||||
}
|
}
|
||||||
if env.Git.Commit != "" {
|
if target.Git.Commit != "" {
|
||||||
git.Commit = env.Git.Commit
|
git.Commit = target.Git.Commit
|
||||||
|
}
|
||||||
|
if target.Git.OriginURL != "" {
|
||||||
|
git.OriginURL = target.Git.OriginURL
|
||||||
|
}
|
||||||
|
|
||||||
|
if target.Sync != nil {
|
||||||
|
err = mergo.Merge(&r.Sync, target.Sync, mergo.WithAppendSlice)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if target.Permissions != nil {
|
||||||
|
err = mergo.Merge(&r.Permissions, target.Permissions, mergo.WithAppendSlice)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
if env.Git.OriginURL != "" {
|
|
||||||
git.OriginURL = env.Git.OriginURL
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
|
|
@ -2,11 +2,7 @@ package config
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"reflect"
|
"reflect"
|
||||||
"runtime"
|
|
||||||
"strings"
|
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle/config/variable"
|
"github.com/databricks/cli/bundle/config/variable"
|
||||||
|
@ -29,8 +25,7 @@ func TestRootMarshalUnmarshal(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestRootLoad(t *testing.T) {
|
func TestRootLoad(t *testing.T) {
|
||||||
root := &Root{}
|
root, err := Load("../tests/basic/databricks.yml")
|
||||||
err := root.Load("../tests/basic/databricks.yml")
|
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, "basic", root.Bundle.Name)
|
assert.Equal(t, "basic", root.Bundle.Name)
|
||||||
}
|
}
|
||||||
|
@ -57,7 +52,7 @@ func TestRootMergeStruct(t *testing.T) {
|
||||||
func TestRootMergeMap(t *testing.T) {
|
func TestRootMergeMap(t *testing.T) {
|
||||||
root := &Root{
|
root := &Root{
|
||||||
Path: "path",
|
Path: "path",
|
||||||
Environments: map[string]*Environment{
|
Targets: map[string]*Target{
|
||||||
"development": {
|
"development": {
|
||||||
Workspace: &Workspace{
|
Workspace: &Workspace{
|
||||||
Host: "foo",
|
Host: "foo",
|
||||||
|
@ -68,7 +63,7 @@ func TestRootMergeMap(t *testing.T) {
|
||||||
}
|
}
|
||||||
other := &Root{
|
other := &Root{
|
||||||
Path: "path",
|
Path: "path",
|
||||||
Environments: map[string]*Environment{
|
Targets: map[string]*Target{
|
||||||
"development": {
|
"development": {
|
||||||
Workspace: &Workspace{
|
Workspace: &Workspace{
|
||||||
Host: "bar",
|
Host: "bar",
|
||||||
|
@ -77,22 +72,19 @@ func TestRootMergeMap(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
assert.NoError(t, root.Merge(other))
|
assert.NoError(t, root.Merge(other))
|
||||||
assert.Equal(t, &Workspace{Host: "bar", Profile: "profile"}, root.Environments["development"].Workspace)
|
assert.Equal(t, &Workspace{Host: "bar", Profile: "profile"}, root.Targets["development"].Workspace)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestDuplicateIdOnLoadReturnsError(t *testing.T) {
|
func TestDuplicateIdOnLoadReturnsError(t *testing.T) {
|
||||||
root := &Root{}
|
_, err := Load("./testdata/duplicate_resource_names_in_root/databricks.yml")
|
||||||
err := root.Load("./testdata/duplicate_resource_names_in_root/databricks.yml")
|
|
||||||
assert.ErrorContains(t, err, "multiple resources named foo (job at ./testdata/duplicate_resource_names_in_root/databricks.yml, pipeline at ./testdata/duplicate_resource_names_in_root/databricks.yml)")
|
assert.ErrorContains(t, err, "multiple resources named foo (job at ./testdata/duplicate_resource_names_in_root/databricks.yml, pipeline at ./testdata/duplicate_resource_names_in_root/databricks.yml)")
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestDuplicateIdOnMergeReturnsError(t *testing.T) {
|
func TestDuplicateIdOnMergeReturnsError(t *testing.T) {
|
||||||
root := &Root{}
|
root, err := Load("./testdata/duplicate_resource_name_in_subconfiguration/databricks.yml")
|
||||||
err := root.Load("./testdata/duplicate_resource_name_in_subconfiguration/databricks.yml")
|
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
other := &Root{}
|
other, err := Load("./testdata/duplicate_resource_name_in_subconfiguration/resources.yml")
|
||||||
err = other.Load("./testdata/duplicate_resource_name_in_subconfiguration/resources.yml")
|
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
err = root.Merge(other)
|
err = root.Merge(other)
|
||||||
|
@ -159,70 +151,11 @@ func TestInitializeVariablesUndefinedVariables(t *testing.T) {
|
||||||
assert.ErrorContains(t, err, "variable bar has not been defined")
|
assert.ErrorContains(t, err, "variable bar has not been defined")
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestRootMergeEnvironmentWithMode(t *testing.T) {
|
func TestRootMergeTargetOverridesWithMode(t *testing.T) {
|
||||||
root := &Root{
|
root := &Root{
|
||||||
Bundle: Bundle{},
|
Bundle: Bundle{},
|
||||||
}
|
}
|
||||||
env := &Environment{Mode: Development}
|
env := &Target{Mode: Development}
|
||||||
require.NoError(t, root.MergeEnvironment(env))
|
require.NoError(t, root.MergeTargetOverrides(env))
|
||||||
assert.Equal(t, Development, root.Bundle.Mode)
|
assert.Equal(t, Development, root.Bundle.Mode)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestConfigFileNames_FindInPath(t *testing.T) {
|
|
||||||
testCases := []struct {
|
|
||||||
name string
|
|
||||||
files []string
|
|
||||||
expected string
|
|
||||||
err string
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
name: "file found",
|
|
||||||
files: []string{"databricks.yml"},
|
|
||||||
expected: "BASE/databricks.yml",
|
|
||||||
err: "",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "file found",
|
|
||||||
files: []string{"bundle.yml"},
|
|
||||||
expected: "BASE/bundle.yml",
|
|
||||||
err: "",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "multiple files found",
|
|
||||||
files: []string{"databricks.yaml", "bundle.yml"},
|
|
||||||
expected: "",
|
|
||||||
err: "multiple bundle root configuration files found",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "file not found",
|
|
||||||
files: []string{},
|
|
||||||
expected: "",
|
|
||||||
err: "no such file or directory",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
if runtime.GOOS == "windows" {
|
|
||||||
testCases[3].err = "The system cannot find the file specified."
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tc := range testCases {
|
|
||||||
t.Run(tc.name, func(t *testing.T) {
|
|
||||||
projectDir := t.TempDir()
|
|
||||||
for _, file := range tc.files {
|
|
||||||
f1, _ := os.Create(filepath.Join(projectDir, file))
|
|
||||||
f1.Close()
|
|
||||||
}
|
|
||||||
|
|
||||||
result, err := FileNames.FindInPath(projectDir)
|
|
||||||
|
|
||||||
expected := strings.Replace(tc.expected, "BASE/", projectDir+string(os.PathSeparator), 1)
|
|
||||||
assert.Equal(t, expected, result)
|
|
||||||
|
|
||||||
if tc.err != "" {
|
|
||||||
assert.ErrorContains(t, err, tc.err)
|
|
||||||
} else {
|
|
||||||
assert.NoError(t, err)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
|
@ -0,0 +1,31 @@
|
||||||
|
package config
|
||||||
|
|
||||||
|
import "path/filepath"
|
||||||
|
|
||||||
|
type Sync struct {
|
||||||
|
// Include contains a list of globs evaluated relative to the bundle root path
|
||||||
|
// to explicitly include files that were excluded by the user's gitignore.
|
||||||
|
Include []string `json:"include,omitempty"`
|
||||||
|
|
||||||
|
// Exclude contains a list of globs evaluated relative to the bundle root path
|
||||||
|
// to explicitly exclude files that were included by
|
||||||
|
// 1) the default that observes the user's gitignore, or
|
||||||
|
// 2) the `Include` field above.
|
||||||
|
Exclude []string `json:"exclude,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Sync) Merge(root *Root, other *Root) error {
|
||||||
|
path, err := filepath.Rel(root.Path, other.Path)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, include := range other.Sync.Include {
|
||||||
|
s.Include = append(s.Include, filepath.Join(path, include))
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, exclude := range other.Sync.Exclude {
|
||||||
|
s.Exclude = append(s.Exclude, filepath.Join(path, exclude))
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue