mirror of https://github.com/databricks/cli.git
Merge remote-tracking branch 'origin' into init-all
This commit is contained in:
commit
aabb574749
|
@ -18,6 +18,13 @@ var accountCmd = &cobra.Command{
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
root.RootCmd.AddCommand(accountCmd)
|
root.RootCmd.AddCommand(accountCmd)
|
||||||
{{range .Services}}{{if .IsAccounts}}{{if not (in $excludes .KebabName) }}
|
|
||||||
accountCmd.AddCommand({{.SnakeName}}.Cmd){{end}}{{end}}{{end}}
|
{{range .Services}}{{if .IsAccounts}}{{if not (in $excludes .KebabName) -}}
|
||||||
|
accountCmd.AddCommand({{.SnakeName}}.Cmd)
|
||||||
|
{{end}}{{end}}{{end}}
|
||||||
|
|
||||||
|
// Register commands with groups
|
||||||
|
{{range .Services}}{{if .IsAccounts}}{{if not (in $excludes .KebabName) -}}
|
||||||
|
{{.SnakeName}}.Cmd.GroupID = "{{ .Package.Name }}"
|
||||||
|
{{end}}{{end}}{{end}}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT.
|
// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT.
|
||||||
|
|
||||||
package cmd
|
package workspace
|
||||||
|
|
||||||
{{$excludes := list "command-execution" "statement-execution" "dbfs" "dbsql-permissions"}}
|
{{$excludes := list "command-execution" "statement-execution" "dbfs" "dbsql-permissions"}}
|
||||||
|
|
||||||
|
@ -11,6 +11,12 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
{{range .Services}}{{if not .IsAccounts}}{{if not (in $excludes .KebabName) }}
|
{{range .Services}}{{if not .IsAccounts}}{{if not (in $excludes .KebabName) -}}
|
||||||
root.RootCmd.AddCommand({{.SnakeName}}.Cmd){{end}}{{end}}{{end}}
|
root.RootCmd.AddCommand({{.SnakeName}}.Cmd)
|
||||||
|
{{end}}{{end}}{{end}}
|
||||||
|
|
||||||
|
// Register commands with groups
|
||||||
|
{{range .Services}}{{if not .IsAccounts}}{{if not (in $excludes .KebabName) -}}
|
||||||
|
{{.SnakeName}}.Cmd.GroupID = "{{ .Package.Name }}"
|
||||||
|
{{end}}{{end}}{{end}}
|
||||||
}
|
}
|
||||||
|
|
|
@ -20,17 +20,31 @@ import (
|
||||||
{{define "service"}}
|
{{define "service"}}
|
||||||
var Cmd = &cobra.Command{
|
var Cmd = &cobra.Command{
|
||||||
Use: "{{(.TrimPrefix "account").KebabName}}",
|
Use: "{{(.TrimPrefix "account").KebabName}}",
|
||||||
{{if .Description -}}
|
{{- if .Description }}
|
||||||
Short: `{{.Summary | without "`"}}`,
|
Short: `{{.Summary | without "`"}}`,
|
||||||
Long: `{{.Comment " " 80 | without "`"}}`,
|
Long: `{{.Comment " " 80 | without "`"}}`,
|
||||||
{{- end}}
|
{{- end }}
|
||||||
|
Annotations: map[string]string{
|
||||||
|
"package": "{{ .Package.Name }}",
|
||||||
|
},
|
||||||
|
{{- if .IsPrivatePreview }}
|
||||||
|
|
||||||
|
// This service is being previewed; hide from help output.
|
||||||
|
Hidden: true,
|
||||||
|
{{- end }}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
{{- $serviceName := .KebabName -}}
|
||||||
{{range .Methods}}
|
{{range .Methods}}
|
||||||
|
|
||||||
|
{{- $excludes := list "put-secret" -}}
|
||||||
|
{{if in $excludes .KebabName }}
|
||||||
|
{{continue}}
|
||||||
|
{{end}}
|
||||||
// start {{.KebabName}} command
|
// start {{.KebabName}} command
|
||||||
|
|
||||||
{{if .Request}}var {{.CamelName}}Req {{.Service.Package.Name}}.{{.Request.PascalName}}
|
{{if .Request}}var {{.CamelName}}Req {{.Service.Package.Name}}.{{.Request.PascalName}}
|
||||||
{{if not .Request.IsOnlyPrimitiveFields}}var {{.CamelName}}Json flags.JsonFlag{{end}}
|
var {{.CamelName}}Json flags.JsonFlag
|
||||||
{{- end}}
|
{{- end}}
|
||||||
{{if .Wait}}var {{.CamelName}}SkipWait bool
|
{{if .Wait}}var {{.CamelName}}SkipWait bool
|
||||||
var {{.CamelName}}Timeout time.Duration{{end}}
|
var {{.CamelName}}Timeout time.Duration{{end}}
|
||||||
|
@ -42,45 +56,74 @@ func init() {
|
||||||
{{.CamelName}}Cmd.Flags().DurationVar(&{{.CamelName}}Timeout, "timeout", {{.Wait.Timeout}}*time.Minute, `maximum amount of time to reach {{range $i, $e := .Wait.Success}}{{if $i}} or {{end}}{{.Content}}{{end}} state`)
|
{{.CamelName}}Cmd.Flags().DurationVar(&{{.CamelName}}Timeout, "timeout", {{.Wait.Timeout}}*time.Minute, `maximum amount of time to reach {{range $i, $e := .Wait.Success}}{{if $i}} or {{end}}{{.Content}}{{end}} state`)
|
||||||
{{end -}}
|
{{end -}}
|
||||||
{{if .Request}}// TODO: short flags
|
{{if .Request}}// TODO: short flags
|
||||||
{{if not .Request.IsOnlyPrimitiveFields}}{{.CamelName}}Cmd.Flags().Var(&{{.CamelName}}Json, "json", `either inline JSON string or @path/to/file.json with request body`){{end}}
|
{{.CamelName}}Cmd.Flags().Var(&{{.CamelName}}Json, "json", `either inline JSON string or @path/to/file.json with request body`)
|
||||||
{{$method := .}}
|
{{$method := .}}
|
||||||
|
{{ if not .IsJsonOnly }}
|
||||||
{{range .Request.Fields -}}
|
{{range .Request.Fields -}}
|
||||||
{{- if not .Required -}}
|
{{- if not .Required -}}
|
||||||
{{if .Entity.IsObject }}// TODO: complex arg: {{.Name}}
|
{{if .Entity.IsObject }}// TODO: complex arg: {{.Name}}
|
||||||
{{else if .Entity.IsAny }}// TODO: any: {{.Name}}
|
{{else if .Entity.IsAny }}// TODO: any: {{.Name}}
|
||||||
{{else if .Entity.ArrayValue }}// TODO: array: {{.Name}}
|
{{else if .Entity.ArrayValue }}// TODO: array: {{.Name}}
|
||||||
{{else if .Entity.MapValue }}// TODO: map via StringToStringVar: {{.Name}}
|
{{else if .Entity.MapValue }}// TODO: map via StringToStringVar: {{.Name}}
|
||||||
|
{{else if .Entity.IsEmpty }}// TODO: output-only field
|
||||||
{{else if .Entity.Enum }}{{$method.CamelName}}Cmd.Flags().Var(&{{$method.CamelName}}Req.{{.PascalName}}, "{{.KebabName}}", `{{.Summary | without "`"}}`)
|
{{else if .Entity.Enum }}{{$method.CamelName}}Cmd.Flags().Var(&{{$method.CamelName}}Req.{{.PascalName}}, "{{.KebabName}}", `{{.Summary | without "`"}}`)
|
||||||
{{else}}{{$method.CamelName}}Cmd.Flags().{{template "arg-type" .Entity}}(&{{$method.CamelName}}Req.{{.PascalName}}, "{{.KebabName}}", {{$method.CamelName}}Req.{{.PascalName}}, `{{.Summary | without "`"}}`)
|
{{else}}{{$method.CamelName}}Cmd.Flags().{{template "arg-type" .Entity}}(&{{$method.CamelName}}Req.{{.PascalName}}, "{{.KebabName}}", {{$method.CamelName}}Req.{{.PascalName}}, `{{.Summary | without "`"}}`)
|
||||||
{{end}}
|
{{end}}
|
||||||
{{- end -}}
|
{{- end -}}
|
||||||
{{- end}}
|
{{- end}}
|
||||||
|
{{- end}}
|
||||||
{{end}}
|
{{end}}
|
||||||
}
|
}
|
||||||
{{ $hasPosArgs := and .Request (or .Request.IsOnlyPrimitiveFields (eq .PascalName "RunNow")) -}}
|
{{- $excludeFromPrompts := list "workspace get-status" -}}
|
||||||
|
{{- $fullCommandName := (print $serviceName " " .KebabName) -}}
|
||||||
|
{{- $noPrompt := or .IsCrudCreate (in $excludeFromPrompts $fullCommandName) }}
|
||||||
|
|
||||||
|
{{ $hasPosArgs := and .Request (or .Request.IsAllRequiredFieldsPrimitive (eq .PascalName "RunNow")) -}}
|
||||||
{{- $hasSinglePosArg := and $hasPosArgs (eq 1 (len .Request.RequiredFields)) -}}
|
{{- $hasSinglePosArg := and $hasPosArgs (eq 1 (len .Request.RequiredFields)) -}}
|
||||||
{{- $serviceHasNamedIdMap := and .Service.List .Service.List.NamedIdMap -}}
|
{{- $serviceHasNamedIdMap := and (and .Service.List .Service.List.NamedIdMap) (not (eq .PascalName "List")) -}}
|
||||||
{{- $hasIdPrompt := and $hasSinglePosArg $serviceHasNamedIdMap -}}
|
{{- $hasIdPrompt := and (not $noPrompt) (and $hasSinglePosArg $serviceHasNamedIdMap) -}}
|
||||||
{{- $wait := and .Wait (and (not .IsCrudRead) (not (eq .SnakeName "get_run"))) -}}
|
{{- $wait := and .Wait (and (not .IsCrudRead) (not (eq .SnakeName "get_run"))) -}}
|
||||||
|
{{- $hasRequiredArgs := and (not $hasIdPrompt) $hasPosArgs -}}
|
||||||
var {{.CamelName}}Cmd = &cobra.Command{
|
var {{.CamelName}}Cmd = &cobra.Command{
|
||||||
Use: "{{.KebabName}}{{if $hasPosArgs}}{{range .Request.RequiredFields}} {{.ConstantName}}{{end}}{{end}}",
|
Use: "{{.KebabName}}{{if $hasPosArgs}}{{range .Request.RequiredFields}} {{.ConstantName}}{{end}}{{end}}",
|
||||||
{{if .Description -}}
|
{{- if .Description }}
|
||||||
Short: `{{.Summary | without "`"}}`,
|
Short: `{{.Summary | without "`"}}`,
|
||||||
Long: `{{.Comment " " 80 | without "`"}}`,
|
Long: `{{.Comment " " 80 | without "`"}}`,
|
||||||
{{end}}
|
{{- end }}
|
||||||
Annotations: map[string]string{},{{if and (not $hasIdPrompt) $hasPosArgs }}
|
{{- if .IsPrivatePreview }}
|
||||||
Args: cobra.ExactArgs({{len .Request.RequiredFields}}),{{end}}
|
|
||||||
|
// This command is being previewed; hide from help output.
|
||||||
|
Hidden: true,
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
|
Annotations: map[string]string{},{{if $hasRequiredArgs }}
|
||||||
|
Args: func(cmd *cobra.Command, args []string) error {
|
||||||
|
check := cobra.ExactArgs({{len .Request.RequiredFields}})
|
||||||
|
if cmd.Flags().Changed("json") {
|
||||||
|
check = cobra.ExactArgs(0)
|
||||||
|
}
|
||||||
|
return check(cmd, args)
|
||||||
|
},{{end}}
|
||||||
PreRunE: root.Must{{if .Service.IsAccounts}}Account{{else}}Workspace{{end}}Client,
|
PreRunE: root.Must{{if .Service.IsAccounts}}Account{{else}}Workspace{{end}}Client,
|
||||||
RunE: func(cmd *cobra.Command, args []string) (err error) {
|
RunE: func(cmd *cobra.Command, args []string) (err error) {
|
||||||
ctx := cmd.Context()
|
ctx := cmd.Context()
|
||||||
{{if .Service.IsAccounts}}a := root.AccountClient(ctx){{else}}w := root.WorkspaceClient(ctx){{end}}
|
{{if .Service.IsAccounts}}a := root.AccountClient(ctx){{else}}w := root.WorkspaceClient(ctx){{end}}
|
||||||
{{- if .Request -}}
|
{{- if .Request }}
|
||||||
{{if $hasIdPrompt}}
|
if cmd.Flags().Changed("json") {
|
||||||
if len(args) == 0 {
|
err = {{.CamelName}}Json.Unmarshal(&{{.CamelName}}Req)
|
||||||
names, err := {{if .Service.IsAccounts}}a{{else}}w{{end}}.{{(.Service.TrimPrefix "account").PascalName}}.{{.Service.List.NamedIdMap.PascalName}}(ctx{{if .Service.List.Request}}, {{.Service.Package.Name}}.{{.Service.List.Request.PascalName}}{}{{end}})
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
{{- if $hasIdPrompt}}
|
||||||
|
if len(args) == 0 {
|
||||||
|
promptSpinner := cmdio.Spinner(ctx)
|
||||||
|
promptSpinner <- "No{{range .Request.RequiredFields}} {{.ConstantName}}{{end}} argument specified. Loading names for {{.Service.TitleName}} drop-down."
|
||||||
|
names, err := {{if .Service.IsAccounts}}a{{else}}w{{end}}.{{(.Service.TrimPrefix "account").PascalName}}.{{.Service.List.NamedIdMap.PascalName}}(ctx{{if .Service.List.Request}}, {{.Service.Package.Name}}.{{.Service.List.Request.PascalName}}{}{{end}})
|
||||||
|
close(promptSpinner)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to load names for {{.Service.TitleName}} drop-down. Please manually specify required arguments. Original error: %w", err)
|
||||||
|
}
|
||||||
id, err := cmdio.Select(ctx, names, "{{range .Request.RequiredFields}}{{.Summary | trimSuffix "."}}{{end}}")
|
id, err := cmdio.Select(ctx, names, "{{range .Request.RequiredFields}}{{.Summary | trimSuffix "."}}{{end}}")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -89,13 +132,10 @@ var {{.CamelName}}Cmd = &cobra.Command{
|
||||||
}
|
}
|
||||||
if len(args) != 1 {
|
if len(args) != 1 {
|
||||||
return fmt.Errorf("expected to have {{range .Request.RequiredFields}}{{.Summary | trimSuffix "." | lower}}{{end}}")
|
return fmt.Errorf("expected to have {{range .Request.RequiredFields}}{{.Summary | trimSuffix "." | lower}}{{end}}")
|
||||||
}{{end}}{{if not .Request.IsOnlyPrimitiveFields}}
|
|
||||||
err = {{.CamelName}}Json.Unmarshal(&{{.CamelName}}Req)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
}
|
||||||
{{- end -}}
|
{{- end -}}
|
||||||
{{$method := .}}
|
{{$method := .}}
|
||||||
|
{{- if and .Request.IsAllRequiredFieldsPrimitive (not .IsJsonOnly) -}}
|
||||||
{{- range $arg, $field := .Request.RequiredFields}}
|
{{- range $arg, $field := .Request.RequiredFields}}
|
||||||
{{if not $field.Entity.IsString -}}
|
{{if not $field.Entity.IsString -}}
|
||||||
_, err = fmt.Sscan(args[{{$arg}}], &{{$method.CamelName}}Req.{{$field.PascalName}})
|
_, err = fmt.Sscan(args[{{$arg}}], &{{$method.CamelName}}Req.{{$field.PascalName}})
|
||||||
|
@ -104,37 +144,44 @@ var {{.CamelName}}Cmd = &cobra.Command{
|
||||||
}{{else -}}
|
}{{else -}}
|
||||||
{{$method.CamelName}}Req.{{$field.PascalName}} = args[{{$arg}}]
|
{{$method.CamelName}}Req.{{$field.PascalName}} = args[{{$arg}}]
|
||||||
{{- end -}}{{end}}
|
{{- end -}}{{end}}
|
||||||
|
{{- else -}}
|
||||||
|
return fmt.Errorf("please provide command input in JSON format by specifying the --json flag")
|
||||||
|
{{- end -}}
|
||||||
|
}
|
||||||
{{end}}
|
{{end}}
|
||||||
{{if $wait -}}
|
{{if $wait -}}
|
||||||
|
wait, err := {{if .Service.IsAccounts}}a{{else}}w{{end}}.{{.Service.PascalName}}.{{.PascalName}}(ctx{{if .Request}}, {{.CamelName}}Req{{end}})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
if {{.CamelName}}SkipWait {
|
if {{.CamelName}}SkipWait {
|
||||||
{{template "method-call" .}}
|
{{if .Response -}}
|
||||||
|
return cmdio.Render(ctx, wait.Response)
|
||||||
|
{{- else -}}
|
||||||
|
return nil
|
||||||
|
{{- end}}
|
||||||
}
|
}
|
||||||
spinner := cmdio.Spinner(ctx)
|
spinner := cmdio.Spinner(ctx)
|
||||||
info, err := {{if .Service.IsAccounts}}a{{else}}w{{end}}.{{.Service.PascalName}}.{{.PascalName}}AndWait(ctx{{if .Request}}, {{.CamelName}}Req{{end}},
|
info, err := wait.OnProgress(func(i *{{.Service.Package.Name}}.{{.Wait.Poll.Response.PascalName}}) {
|
||||||
retries.Timeout[{{.Service.Package.Name}}.{{.Wait.Poll.Response.PascalName}}]({{.CamelName}}Timeout),
|
|
||||||
func(i *retries.Info[{{.Service.Package.Name}}.{{.Wait.Poll.Response.PascalName}}]) {
|
|
||||||
if i.Info == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
{{if .Wait.MessagePath -}}
|
{{if .Wait.MessagePath -}}
|
||||||
{{if .Wait.ComplexMessagePath -}}
|
{{if .Wait.ComplexMessagePath -}}
|
||||||
if i.Info.{{.Wait.MessagePathHead.PascalName}} == nil {
|
if i.{{.Wait.MessagePathHead.PascalName}} == nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
status := i.Info{{range .Wait.StatusPath}}.{{.PascalName}}{{end}}
|
status := i{{range .Wait.StatusPath}}.{{.PascalName}}{{end}}
|
||||||
statusMessage := fmt.Sprintf("current status: %s", status)
|
statusMessage := fmt.Sprintf("current status: %s", status)
|
||||||
if i.Info.{{.Wait.MessagePathHead.PascalName}} != nil {
|
if i.{{.Wait.MessagePathHead.PascalName}} != nil {
|
||||||
statusMessage = i.Info{{range .Wait.MessagePath}}.{{.PascalName}}{{end}}
|
statusMessage = i{{range .Wait.MessagePath}}.{{.PascalName}}{{end}}
|
||||||
}
|
}
|
||||||
{{- else -}}
|
{{- else -}}
|
||||||
statusMessage := i.Info{{range .Wait.MessagePath}}.{{.PascalName}}{{end}}
|
statusMessage := i{{range .Wait.MessagePath}}.{{.PascalName}}{{end}}
|
||||||
{{- end}}
|
{{- end}}
|
||||||
{{- else -}}
|
{{- else -}}
|
||||||
status := i.Info{{range .Wait.StatusPath}}.{{.PascalName}}{{end}}
|
status := i{{range .Wait.StatusPath}}.{{.PascalName}}{{end}}
|
||||||
statusMessage := fmt.Sprintf("current status: %s", status)
|
statusMessage := fmt.Sprintf("current status: %s", status)
|
||||||
{{- end}}
|
{{- end}}
|
||||||
spinner <- statusMessage
|
spinner <- statusMessage
|
||||||
})
|
}).GetWithTimeout({{.CamelName}}Timeout)
|
||||||
close(spinner)
|
close(spinner)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -144,6 +191,9 @@ var {{.CamelName}}Cmd = &cobra.Command{
|
||||||
{{template "method-call" .}}
|
{{template "method-call" .}}
|
||||||
{{end -}}
|
{{end -}}
|
||||||
},
|
},
|
||||||
|
// Disable completions since they are not applicable.
|
||||||
|
// Can be overridden by manual implementation in `override.go`.
|
||||||
|
ValidArgsFunction: cobra.NoFileCompletions,
|
||||||
}
|
}
|
||||||
{{end}}
|
{{end}}
|
||||||
// end service {{.Name}}{{end}}
|
// end service {{.Name}}{{end}}
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
cmd/account/access-control/access-control.go linguist-generated=true
|
||||||
cmd/account/billable-usage/billable-usage.go linguist-generated=true
|
cmd/account/billable-usage/billable-usage.go linguist-generated=true
|
||||||
cmd/account/budgets/budgets.go linguist-generated=true
|
cmd/account/budgets/budgets.go linguist-generated=true
|
||||||
cmd/account/cmd.go linguist-generated=true
|
cmd/account/cmd.go linguist-generated=true
|
||||||
|
@ -13,7 +14,9 @@ cmd/account/networks/networks.go linguist-generated=true
|
||||||
cmd/account/o-auth-enrollment/o-auth-enrollment.go linguist-generated=true
|
cmd/account/o-auth-enrollment/o-auth-enrollment.go linguist-generated=true
|
||||||
cmd/account/private-access/private-access.go linguist-generated=true
|
cmd/account/private-access/private-access.go linguist-generated=true
|
||||||
cmd/account/published-app-integration/published-app-integration.go linguist-generated=true
|
cmd/account/published-app-integration/published-app-integration.go linguist-generated=true
|
||||||
|
cmd/account/service-principal-secrets/service-principal-secrets.go linguist-generated=true
|
||||||
cmd/account/service-principals/service-principals.go linguist-generated=true
|
cmd/account/service-principals/service-principals.go linguist-generated=true
|
||||||
|
cmd/account/settings/settings.go linguist-generated=true
|
||||||
cmd/account/storage-credentials/storage-credentials.go linguist-generated=true
|
cmd/account/storage-credentials/storage-credentials.go linguist-generated=true
|
||||||
cmd/account/storage/storage.go linguist-generated=true
|
cmd/account/storage/storage.go linguist-generated=true
|
||||||
cmd/account/users/users.go linguist-generated=true
|
cmd/account/users/users.go linguist-generated=true
|
||||||
|
@ -25,6 +28,7 @@ cmd/workspace/catalogs/catalogs.go linguist-generated=true
|
||||||
cmd/workspace/cluster-policies/cluster-policies.go linguist-generated=true
|
cmd/workspace/cluster-policies/cluster-policies.go linguist-generated=true
|
||||||
cmd/workspace/clusters/clusters.go linguist-generated=true
|
cmd/workspace/clusters/clusters.go linguist-generated=true
|
||||||
cmd/workspace/cmd.go linguist-generated=true
|
cmd/workspace/cmd.go linguist-generated=true
|
||||||
|
cmd/workspace/connections/connections.go linguist-generated=true
|
||||||
cmd/workspace/current-user/current-user.go linguist-generated=true
|
cmd/workspace/current-user/current-user.go linguist-generated=true
|
||||||
cmd/workspace/dashboards/dashboards.go linguist-generated=true
|
cmd/workspace/dashboards/dashboards.go linguist-generated=true
|
||||||
cmd/workspace/data-sources/data-sources.go linguist-generated=true
|
cmd/workspace/data-sources/data-sources.go linguist-generated=true
|
||||||
|
@ -57,6 +61,7 @@ cmd/workspace/service-principals/service-principals.go linguist-generated=true
|
||||||
cmd/workspace/serving-endpoints/serving-endpoints.go linguist-generated=true
|
cmd/workspace/serving-endpoints/serving-endpoints.go linguist-generated=true
|
||||||
cmd/workspace/shares/shares.go linguist-generated=true
|
cmd/workspace/shares/shares.go linguist-generated=true
|
||||||
cmd/workspace/storage-credentials/storage-credentials.go linguist-generated=true
|
cmd/workspace/storage-credentials/storage-credentials.go linguist-generated=true
|
||||||
|
cmd/workspace/system-schemas/system-schemas.go linguist-generated=true
|
||||||
cmd/workspace/table-constraints/table-constraints.go linguist-generated=true
|
cmd/workspace/table-constraints/table-constraints.go linguist-generated=true
|
||||||
cmd/workspace/tables/tables.go linguist-generated=true
|
cmd/workspace/tables/tables.go linguist-generated=true
|
||||||
cmd/workspace/token-management/token-management.go linguist-generated=true
|
cmd/workspace/token-management/token-management.go linguist-generated=true
|
||||||
|
@ -64,5 +69,6 @@ cmd/workspace/tokens/tokens.go linguist-generated=true
|
||||||
cmd/workspace/users/users.go linguist-generated=true
|
cmd/workspace/users/users.go linguist-generated=true
|
||||||
cmd/workspace/volumes/volumes.go linguist-generated=true
|
cmd/workspace/volumes/volumes.go linguist-generated=true
|
||||||
cmd/workspace/warehouses/warehouses.go linguist-generated=true
|
cmd/workspace/warehouses/warehouses.go linguist-generated=true
|
||||||
|
cmd/workspace/workspace-bindings/workspace-bindings.go linguist-generated=true
|
||||||
cmd/workspace/workspace-conf/workspace-conf.go linguist-generated=true
|
cmd/workspace/workspace-conf/workspace-conf.go linguist-generated=true
|
||||||
cmd/workspace/workspace/workspace.go linguist-generated=true
|
cmd/workspace/workspace/workspace.go linguist-generated=true
|
||||||
|
|
103
CHANGELOG.md
103
CHANGELOG.md
|
@ -1,5 +1,108 @@
|
||||||
# Version changelog
|
# Version changelog
|
||||||
|
|
||||||
|
## 0.200.0
|
||||||
|
|
||||||
|
This version marks the first version available as public preview.
|
||||||
|
|
||||||
|
The minor bump to 200 better disambiguates between Databricks CLI "v1" (the Python version)
|
||||||
|
and this version, Databricks CLI "v2". The minor version of 0.100 may look lower than 0.17
|
||||||
|
to some, whereas 200 does not. This bump has no other significance.
|
||||||
|
|
||||||
|
CLI:
|
||||||
|
* Add filer.Filer implementation backed by the Files API ([#474](https://github.com/databricks/cli/pull/474)).
|
||||||
|
* Add fs cp command ([#463](https://github.com/databricks/cli/pull/463)).
|
||||||
|
* Correctly set ExactArgs if generated command has positional arguments ([#488](https://github.com/databricks/cli/pull/488)).
|
||||||
|
* Do not use white color as string output ([#489](https://github.com/databricks/cli/pull/489)).
|
||||||
|
* Update README to reflect public preview status ([#491](https://github.com/databricks/cli/pull/491)).
|
||||||
|
|
||||||
|
Bundles:
|
||||||
|
* Fix force flag not working for bundle destroy ([#434](https://github.com/databricks/cli/pull/434)).
|
||||||
|
* Fix locker unlock for destroy ([#492](https://github.com/databricks/cli/pull/492)).
|
||||||
|
* Use better error assertions and clean up locker API ([#490](https://github.com/databricks/cli/pull/490)).
|
||||||
|
|
||||||
|
Dependencies:
|
||||||
|
* Bump golang.org/x/mod from 0.10.0 to 0.11.0 ([#496](https://github.com/databricks/cli/pull/496)).
|
||||||
|
* Bump golang.org/x/sync from 0.2.0 to 0.3.0 ([#495](https://github.com/databricks/cli/pull/495)).
|
||||||
|
|
||||||
|
## 0.100.4
|
||||||
|
|
||||||
|
CLI:
|
||||||
|
* Add workspace import-dir command ([#456](https://github.com/databricks/cli/pull/456)).
|
||||||
|
* Annotate generated commands with OpenAPI package name ([#466](https://github.com/databricks/cli/pull/466)).
|
||||||
|
* Associate generated commands with command groups ([#475](https://github.com/databricks/cli/pull/475)).
|
||||||
|
* Disable shell completions for generated commands ([#483](https://github.com/databricks/cli/pull/483)).
|
||||||
|
* Include [DEFAULT] section header when writing ~/.databrickscfg ([#464](https://github.com/databricks/cli/pull/464)).
|
||||||
|
* Pass through proxy related environment variables ([#465](https://github.com/databricks/cli/pull/465)).
|
||||||
|
* Restore flags to original values on test completion ([#470](https://github.com/databricks/cli/pull/470)).
|
||||||
|
* Update configure command ([#482](https://github.com/databricks/cli/pull/482)).
|
||||||
|
|
||||||
|
Dependencies:
|
||||||
|
* Bump SDK to latest ([#473](https://github.com/databricks/cli/pull/473)).
|
||||||
|
|
||||||
|
## 0.100.3
|
||||||
|
|
||||||
|
CLI:
|
||||||
|
* Add directory tracking to sync ([#425](https://github.com/databricks/cli/pull/425)).
|
||||||
|
* Add fs cat command for dbfs files ([#430](https://github.com/databricks/cli/pull/430)).
|
||||||
|
* Add fs ls command for dbfs ([#429](https://github.com/databricks/cli/pull/429)).
|
||||||
|
* Add fs mkdirs command for dbfs ([#432](https://github.com/databricks/cli/pull/432)).
|
||||||
|
* Add fs rm command for dbfs ([#433](https://github.com/databricks/cli/pull/433)).
|
||||||
|
* Add installation instructions ([#458](https://github.com/databricks/cli/pull/458)).
|
||||||
|
* Add new line to cmdio JSON rendering ([#443](https://github.com/databricks/cli/pull/443)).
|
||||||
|
* Add profile on `databricks auth login` ([#423](https://github.com/databricks/cli/pull/423)).
|
||||||
|
* Add readable console logger ([#370](https://github.com/databricks/cli/pull/370)).
|
||||||
|
* Add workspace export-dir command ([#449](https://github.com/databricks/cli/pull/449)).
|
||||||
|
* Added secrets input prompt for secrets put-secret command ([#413](https://github.com/databricks/cli/pull/413)).
|
||||||
|
* Added spinner when loading command prompts ([#420](https://github.com/databricks/cli/pull/420)).
|
||||||
|
* Better error message if can not load prompts ([#437](https://github.com/databricks/cli/pull/437)).
|
||||||
|
* Changed service template to correctly handle required positional arguments ([#405](https://github.com/databricks/cli/pull/405)).
|
||||||
|
* Do not generate prompts for certain commands ([#438](https://github.com/databricks/cli/pull/438)).
|
||||||
|
* Do not prompt for List methods ([#411](https://github.com/databricks/cli/pull/411)).
|
||||||
|
* Do not use FgWhite and FgBlack for terminal output ([#435](https://github.com/databricks/cli/pull/435)).
|
||||||
|
* Skip path translation of job task for jobs with a Git source ([#404](https://github.com/databricks/cli/pull/404)).
|
||||||
|
* Tweak profile prompt ([#454](https://github.com/databricks/cli/pull/454)).
|
||||||
|
* Update with the latest Go SDK ([#457](https://github.com/databricks/cli/pull/457)).
|
||||||
|
* Use cmdio in version command for `--output` flag ([#419](https://github.com/databricks/cli/pull/419)).
|
||||||
|
|
||||||
|
Bundles:
|
||||||
|
* Check for nil environment before accessing it ([#453](https://github.com/databricks/cli/pull/453)).
|
||||||
|
|
||||||
|
Dependencies:
|
||||||
|
* Bump github.com/hashicorp/terraform-json from 0.16.0 to 0.17.0 ([#459](https://github.com/databricks/cli/pull/459)).
|
||||||
|
* Bump github.com/mattn/go-isatty from 0.0.18 to 0.0.19 ([#412](https://github.com/databricks/cli/pull/412)).
|
||||||
|
|
||||||
|
Internal:
|
||||||
|
* Add Mkdir and ReadDir functions to filer.Filer interface ([#414](https://github.com/databricks/cli/pull/414)).
|
||||||
|
* Add Stat function to filer.Filer interface ([#421](https://github.com/databricks/cli/pull/421)).
|
||||||
|
* Add check for path is a directory in filer.ReadDir ([#426](https://github.com/databricks/cli/pull/426)).
|
||||||
|
* Add fs.FS adapter for the filer interface ([#422](https://github.com/databricks/cli/pull/422)).
|
||||||
|
* Add implementation of filer.Filer for local filesystem ([#460](https://github.com/databricks/cli/pull/460)).
|
||||||
|
* Allow equivalence checking of filer errors to fs errors ([#416](https://github.com/databricks/cli/pull/416)).
|
||||||
|
* Fix locker integration test ([#417](https://github.com/databricks/cli/pull/417)).
|
||||||
|
* Implement DBFS filer ([#139](https://github.com/databricks/cli/pull/139)).
|
||||||
|
* Include recursive deletion in filer interface ([#442](https://github.com/databricks/cli/pull/442)).
|
||||||
|
* Make filer.Filer return fs.DirEntry from ReadDir ([#415](https://github.com/databricks/cli/pull/415)).
|
||||||
|
* Speed up sync integration tests ([#428](https://github.com/databricks/cli/pull/428)).
|
||||||
|
|
||||||
|
## 0.100.2
|
||||||
|
|
||||||
|
CLI:
|
||||||
|
* Reduce parallellism in locker integration test ([#407](https://github.com/databricks/bricks/pull/407)).
|
||||||
|
|
||||||
|
Bundles:
|
||||||
|
* Don't pass synthesized TMPDIR if not already set ([#409](https://github.com/databricks/bricks/pull/409)).
|
||||||
|
* Added support for bundle.Seq, simplified Mutator.Apply interface ([#403](https://github.com/databricks/bricks/pull/403)).
|
||||||
|
* Regenerated internal schema structs based on Terraform provider schemas ([#401](https://github.com/databricks/bricks/pull/401)).
|
||||||
|
|
||||||
|
## 0.100.1
|
||||||
|
|
||||||
|
CLI:
|
||||||
|
* Sync: Gracefully handle broken notebook files ([#398](https://github.com/databricks/cli/pull/398)).
|
||||||
|
* Add version flag to print version and exit ([#394](https://github.com/databricks/cli/pull/394)).
|
||||||
|
* Pass temporary directory environment variables to subprocesses ([#395](https://github.com/databricks/cli/pull/395)).
|
||||||
|
* Rename environment variables `BRICKS_` -> `DATABRICKS_` ([#393](https://github.com/databricks/cli/pull/393)).
|
||||||
|
* Update to Go SDK v0.9.0 ([#396](https://github.com/databricks/cli/pull/396)).
|
||||||
|
|
||||||
## 0.100.0
|
## 0.100.0
|
||||||
|
|
||||||
This release bumps the minor version to 100 to disambiguate between Databricks CLI "v1" (the Python version)
|
This release bumps the minor version to 100 to disambiguate between Databricks CLI "v1" (the Python version)
|
||||||
|
|
|
@ -0,0 +1,25 @@
|
||||||
|
DB license
|
||||||
|
|
||||||
|
Copyright (2022) Databricks, Inc.
|
||||||
|
|
||||||
|
Definitions.
|
||||||
|
|
||||||
|
Agreement: The agreement between Databricks, Inc., and you governing the use of the Databricks Services, which shall be, with respect to Databricks, the Databricks Terms of Service located at www.databricks.com/termsofservice, and with respect to Databricks Community Edition, the Community Edition Terms of Service located at www.databricks.com/ce-termsofuse, in each case unless you have entered into a separate written agreement with Databricks governing the use of the applicable Databricks Services.
|
||||||
|
|
||||||
|
Software: The source code and object code to which this license applies.
|
||||||
|
|
||||||
|
Scope of Use. You may not use this Software except in connection with your use of the Databricks Services pursuant to the Agreement. Your use of the Software must comply at all times with any restrictions applicable to the Databricks Services, generally, and must be used in accordance with any applicable documentation. You may view, use, copy, modify, publish, and/or distribute the Software solely for the purposes of using the code within or connecting to the Databricks Services. If you do not agree to these terms, you may not view, use, copy, modify, publish, and/or distribute the Software.
|
||||||
|
|
||||||
|
Redistribution. You may redistribute and sublicense the Software so long as all use is in compliance with these terms. In addition:
|
||||||
|
|
||||||
|
You must give any other recipients a copy of this License;
|
||||||
|
You must cause any modified files to carry prominent notices stating that you changed the files;
|
||||||
|
You must retain, in the source code form of any derivative works that you distribute, all copyright, patent, trademark, and attribution notices from the source code form, excluding those notices that do not pertain to any part of the derivative works; and
|
||||||
|
If the source code form includes a "NOTICE" text file as part of its distribution, then any derivative works that you distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the derivative works.
|
||||||
|
You may add your own copyright statement to your modifications and may provide additional license terms and conditions for use, reproduction, or distribution of your modifications, or for any such derivative works as a whole, provided your use, reproduction, and distribution of the Software otherwise complies with the conditions stated in this License.
|
||||||
|
|
||||||
|
Termination. This license terminates automatically upon your breach of these terms or upon the termination of your Agreement. Additionally, Databricks may terminate this license at any time on notice. Upon termination, you must permanently delete the Software and all copies thereof.
|
||||||
|
|
||||||
|
DISCLAIMER; LIMITATION OF LIABILITY.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED “AS-IS” AND WITH ALL FAULTS. DATABRICKS, ON BEHALF OF ITSELF AND ITS LICENSORS, SPECIFICALLY DISCLAIMS ALL WARRANTIES RELATING TO THE SOURCE CODE, EXPRESS AND IMPLIED, INCLUDING, WITHOUT LIMITATION, IMPLIED WARRANTIES, CONDITIONS AND OTHER TERMS OF MERCHANTABILITY, SATISFACTORY QUALITY OR FITNESS FOR A PARTICULAR PURPOSE, AND NON-INFRINGEMENT. DATABRICKS AND ITS LICENSORS TOTAL AGGREGATE LIABILITY RELATING TO OR ARISING OUT OF YOUR USE OF OR DATABRICKS’ PROVISIONING OF THE SOURCE CODE SHALL BE LIMITED TO ONE THOUSAND ($1,000) DOLLARS. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|
@ -0,0 +1,100 @@
|
||||||
|
Copyright (2023) Databricks, Inc.
|
||||||
|
|
||||||
|
This Software includes software developed at Databricks (https://www.databricks.com/) and its use is subject to the included LICENSE file.
|
||||||
|
|
||||||
|
This Software contains code from the following open source projects, licensed under the Apache 2.0 license:
|
||||||
|
|
||||||
|
spf13/cobra - https://github.com/spf13/cobra
|
||||||
|
Copyright cobra authors
|
||||||
|
License - https://github.com/spf13/cobra/blob/main/LICENSE.txt
|
||||||
|
|
||||||
|
briandowns/spinner - https://github.com/briandowns/spinner
|
||||||
|
Copyright 2022 Brian J. Downs
|
||||||
|
License - https://github.com/briandowns/spinner/blob/master/LICENSE
|
||||||
|
|
||||||
|
go-ini/ini - https://github.com/go-ini/ini
|
||||||
|
Copyright ini authors
|
||||||
|
License - https://github.com/go-ini/ini/blob/main/LICENSE
|
||||||
|
|
||||||
|
—--
|
||||||
|
|
||||||
|
This software contains code from the following open source projects, licensed under the MPL 2.0 license:
|
||||||
|
|
||||||
|
hashicopr/go-version - https://github.com/hashicorp/go-version
|
||||||
|
Copyright 2014 HashiCorp, Inc.
|
||||||
|
License - https://github.com/hashicorp/go-version/blob/main/LICENSE
|
||||||
|
|
||||||
|
hashicorp/hc-install - https://github.com/hashicorp/hc-install
|
||||||
|
Copyright 2020 HashiCorp, Inc.
|
||||||
|
License - https://github.com/hashicorp/hc-install/blob/main/LICENSE
|
||||||
|
|
||||||
|
hashicopr/terraform-exec - https://github.com/hashicorp/terraform-exec
|
||||||
|
Copyright 2020 HashiCorp, Inc.
|
||||||
|
LIcense - https://github.com/hashicorp/terraform-exec/blob/main/LICENSE
|
||||||
|
|
||||||
|
hashicorp/terraform-json - https://github.com/hashicorp/terraform-json
|
||||||
|
Copyright 2019 HashiCorp, Inc.
|
||||||
|
License - https://github.com/hashicorp/terraform-json/blob/main/LICENSE
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
This software contains code from the following open source projects, licensed under the BSD (2-clause) license:
|
||||||
|
|
||||||
|
pkg/browser - https://github.com/pkg/browser
|
||||||
|
Copyright (c) 2014, Dave Cheney <dave@cheney.net>
|
||||||
|
License - https://github.com/pkg/browser/blob/master/LICENSE
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
This software contains code from the following open source projects, licensed under the BSD (3-clause) license:
|
||||||
|
|
||||||
|
spf13/pflag - https://github.com/spf13/pflag
|
||||||
|
Copyright (c) 2012 Alex Ogier. All rights reserved.
|
||||||
|
Copyright (c) 2012 The Go Authors. All rights reserved.
|
||||||
|
License - https://raw.githubusercontent.com/spf13/pflag/master/LICENSE
|
||||||
|
|
||||||
|
google/uuid - https://github.com/google/uuid
|
||||||
|
Copyright (c) 2009,2014 Google Inc. All rights reserved.
|
||||||
|
License - https://github.com/google/uuid/blob/master/LICENSE
|
||||||
|
|
||||||
|
imdario/mergo - https://github.com/imdario/mergo
|
||||||
|
Copyright (c) 2013 Dario Castañé. All rights reserved.
|
||||||
|
Copyright (c) 2012 The Go Authors. All rights reserved.
|
||||||
|
License - https://github.com/imdario/mergo/blob/master/LICENSE
|
||||||
|
|
||||||
|
manifoldco/promptui - https://github.com/manifoldco/promptui
|
||||||
|
Copyright (c) 2017, Arigato Machine Inc. All rights reserved.
|
||||||
|
License - https://github.com/manifoldco/promptui/blob/master/LICENSE.md
|
||||||
|
|
||||||
|
—--
|
||||||
|
|
||||||
|
This Software contains code from the following open source projects, licensed under the MIT license:
|
||||||
|
|
||||||
|
fatih/color - https://github.com/fatih/color
|
||||||
|
Copyright (c) 2013 Fatih Arslan
|
||||||
|
License - https://github.com/fatih/color/blob/main/LICENSE.md
|
||||||
|
|
||||||
|
ghodss/yaml - https://github.com/ghodss/yaml
|
||||||
|
Copyright (c) 2014 Sam Ghods
|
||||||
|
License - https://github.com/ghodss/yaml/blob/master/LICENSE
|
||||||
|
|
||||||
|
mattn/go-isatty - https://github.com/mattn/go-isatty
|
||||||
|
Copyright (c) Yasuhiro MATSUMOTO <mattn.jp@gmail.com>
|
||||||
|
https://github.com/mattn/go-isatty/blob/master/LICENSE
|
||||||
|
|
||||||
|
nwidger/jsoncolor - https://github.com/nwidger/jsoncolor
|
||||||
|
Copyright (c) 2016 Niels Widger
|
||||||
|
License - https://github.com/nwidger/jsoncolor/blob/master/LICENSE
|
||||||
|
|
||||||
|
sabhiram/go-gitignore - https://github.com/sabhiram/go-gitignore
|
||||||
|
Copyright (c) 2015 Shaba Abhiram
|
||||||
|
License - https://github.com/sabhiram/go-gitignore/blob/master/LICENSE
|
||||||
|
|
||||||
|
|
||||||
|
stretchr/testify - https://github.com/stretchr/testify
|
||||||
|
Copyright (c) 2012-2020 Mat Ryer, Tyler Bunnell and contributors.
|
||||||
|
License - https://github.com/stretchr/testify/blob/master/LICENSE
|
||||||
|
|
||||||
|
whilp/git-urls - https://github.com/whilp/git-urls
|
||||||
|
Copyright (c) 2020 Will Maier
|
||||||
|
License - https://github.com/whilp/git-urls/blob/master/LICENSE
|
11
README.md
11
README.md
|
@ -2,19 +2,18 @@
|
||||||
|
|
||||||
[![build](https://github.com/databricks/cli/workflows/build/badge.svg?branch=main)](https://github.com/databricks/cli/actions?query=workflow%3Abuild+branch%3Amain)
|
[![build](https://github.com/databricks/cli/workflows/build/badge.svg?branch=main)](https://github.com/databricks/cli/actions?query=workflow%3Abuild+branch%3Amain)
|
||||||
|
|
||||||
This project is in private preview.
|
This project is in public preview.
|
||||||
|
|
||||||
Documentation about the full REST API coverage is avaialbe in the [docs folder](docs/commands.md).
|
Documentation about the full REST API coverage is avaialbe in the [docs folder](docs/commands.md).
|
||||||
|
|
||||||
Documentation is available at https://docs.databricks.com/dev-tools/cli/bricks-cli.html.
|
Documentation is available at https://docs.databricks.com/dev-tools/cli/databricks-cli.html.
|
||||||
|
|
||||||
## Installation
|
## Installation
|
||||||
|
|
||||||
This CLI is packaged as a dependency-free binary executable and may be located in any directory.
|
This CLI is packaged as a dependency-free binary executable and may be located in any directory.
|
||||||
|
See https://github.com/databricks/cli/releases for releases and
|
||||||
For convenient access, copy the `databricks` binary to any directory listed in `$PATH`.
|
[the docs pages](https://docs.databricks.com/dev-tools/cli/databricks-cli.html) for
|
||||||
|
installation instructions.
|
||||||
Confirm the binary works by executing `databricks version`.
|
|
||||||
|
|
||||||
## Authentication
|
## Authentication
|
||||||
|
|
||||||
|
|
|
@ -20,7 +20,7 @@ func (m *all) Name() string {
|
||||||
return fmt.Sprintf("artifacts.%sAll", m.name)
|
return fmt.Sprintf("artifacts.%sAll", m.name)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *all) Apply(ctx context.Context, b *bundle.Bundle) ([]bundle.Mutator, error) {
|
func (m *all) Apply(ctx context.Context, b *bundle.Bundle) error {
|
||||||
var out []bundle.Mutator
|
var out []bundle.Mutator
|
||||||
|
|
||||||
// Iterate with stable ordering.
|
// Iterate with stable ordering.
|
||||||
|
@ -30,12 +30,12 @@ func (m *all) Apply(ctx context.Context, b *bundle.Bundle) ([]bundle.Mutator, er
|
||||||
for _, name := range keys {
|
for _, name := range keys {
|
||||||
m, err := m.fn(name)
|
m, err := m.fn(name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
if m != nil {
|
if m != nil {
|
||||||
out = append(out, m)
|
out = append(out, m)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return out, nil
|
return bundle.Apply(ctx, b, bundle.Seq(out...))
|
||||||
}
|
}
|
||||||
|
|
|
@ -27,15 +27,15 @@ func (m *build) Name() string {
|
||||||
return fmt.Sprintf("artifacts.Build(%s)", m.name)
|
return fmt.Sprintf("artifacts.Build(%s)", m.name)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *build) Apply(ctx context.Context, b *bundle.Bundle) ([]bundle.Mutator, error) {
|
func (m *build) Apply(ctx context.Context, b *bundle.Bundle) error {
|
||||||
artifact, ok := b.Config.Artifacts[m.name]
|
artifact, ok := b.Config.Artifacts[m.name]
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, fmt.Errorf("artifact doesn't exist: %s", m.name)
|
return fmt.Errorf("artifact doesn't exist: %s", m.name)
|
||||||
}
|
}
|
||||||
|
|
||||||
if artifact.Notebook != nil {
|
if artifact.Notebook != nil {
|
||||||
return []bundle.Mutator{notebook.Build(m.name)}, nil
|
return bundle.Apply(ctx, b, notebook.Build(m.name))
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil, nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -27,10 +27,10 @@ func (m *build) Name() string {
|
||||||
return fmt.Sprintf("notebook.Build(%s)", m.name)
|
return fmt.Sprintf("notebook.Build(%s)", m.name)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *build) Apply(_ context.Context, b *bundle.Bundle) ([]bundle.Mutator, error) {
|
func (m *build) Apply(_ context.Context, b *bundle.Bundle) error {
|
||||||
a, ok := b.Config.Artifacts[m.name]
|
a, ok := b.Config.Artifacts[m.name]
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, fmt.Errorf("artifact doesn't exist: %s", m.name)
|
return fmt.Errorf("artifact doesn't exist: %s", m.name)
|
||||||
}
|
}
|
||||||
|
|
||||||
artifact := a.Notebook
|
artifact := a.Notebook
|
||||||
|
@ -44,35 +44,35 @@ func (m *build) Apply(_ context.Context, b *bundle.Bundle) ([]bundle.Mutator, er
|
||||||
case ".sql":
|
case ".sql":
|
||||||
artifact.Language = workspace.LanguageSql
|
artifact.Language = workspace.LanguageSql
|
||||||
default:
|
default:
|
||||||
return nil, fmt.Errorf("invalid notebook extension: %s", ext)
|
return fmt.Errorf("invalid notebook extension: %s", ext)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Open underlying file.
|
// Open underlying file.
|
||||||
f, err := os.Open(filepath.Join(b.Config.Path, artifact.Path))
|
f, err := os.Open(filepath.Join(b.Config.Path, artifact.Path))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("unable to open artifact file %s: %w", artifact.Path, errors.Unwrap(err))
|
return fmt.Errorf("unable to open artifact file %s: %w", artifact.Path, errors.Unwrap(err))
|
||||||
}
|
}
|
||||||
defer f.Close()
|
defer f.Close()
|
||||||
|
|
||||||
// Check that the file contains the notebook marker on its first line.
|
// Check that the file contains the notebook marker on its first line.
|
||||||
ok, err = hasMarker(artifact.Language, f)
|
ok, err = hasMarker(artifact.Language, f)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("unable to read artifact file %s: %s", artifact.Path, errors.Unwrap(err))
|
return fmt.Errorf("unable to read artifact file %s: %s", artifact.Path, errors.Unwrap(err))
|
||||||
}
|
}
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, fmt.Errorf("notebook marker not found in %s", artifact.Path)
|
return fmt.Errorf("notebook marker not found in %s", artifact.Path)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check that an artifact path is defined.
|
// Check that an artifact path is defined.
|
||||||
remotePath := b.Config.Workspace.ArtifactsPath
|
remotePath := b.Config.Workspace.ArtifactsPath
|
||||||
if remotePath == "" {
|
if remotePath == "" {
|
||||||
return nil, fmt.Errorf("remote artifact path not configured")
|
return fmt.Errorf("remote artifact path not configured")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Store absolute paths.
|
// Store absolute paths.
|
||||||
artifact.LocalPath = filepath.Join(b.Config.Path, artifact.Path)
|
artifact.LocalPath = filepath.Join(b.Config.Path, artifact.Path)
|
||||||
artifact.RemotePath = path.Join(remotePath, stripExtension(artifact.Path))
|
artifact.RemotePath = path.Join(remotePath, stripExtension(artifact.Path))
|
||||||
return nil, nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func stripExtension(path string) string {
|
func stripExtension(path string) string {
|
||||||
|
|
|
@ -26,35 +26,35 @@ func (m *upload) Name() string {
|
||||||
return fmt.Sprintf("notebook.Upload(%s)", m.name)
|
return fmt.Sprintf("notebook.Upload(%s)", m.name)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *upload) Apply(ctx context.Context, b *bundle.Bundle) ([]bundle.Mutator, error) {
|
func (m *upload) Apply(ctx context.Context, b *bundle.Bundle) error {
|
||||||
a, ok := b.Config.Artifacts[m.name]
|
a, ok := b.Config.Artifacts[m.name]
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, fmt.Errorf("artifact doesn't exist: %s", m.name)
|
return fmt.Errorf("artifact doesn't exist: %s", m.name)
|
||||||
}
|
}
|
||||||
|
|
||||||
artifact := a.Notebook
|
artifact := a.Notebook
|
||||||
raw, err := os.ReadFile(artifact.LocalPath)
|
raw, err := os.ReadFile(artifact.LocalPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("unable to read %s: %w", m.name, errors.Unwrap(err))
|
return fmt.Errorf("unable to read %s: %w", m.name, errors.Unwrap(err))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Make sure target directory exists.
|
// Make sure target directory exists.
|
||||||
err = b.WorkspaceClient().Workspace.MkdirsByPath(ctx, path.Dir(artifact.RemotePath))
|
err = b.WorkspaceClient().Workspace.MkdirsByPath(ctx, path.Dir(artifact.RemotePath))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("unable to create directory for %s: %w", m.name, err)
|
return fmt.Errorf("unable to create directory for %s: %w", m.name, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Import to workspace.
|
// Import to workspace.
|
||||||
err = b.WorkspaceClient().Workspace.Import(ctx, workspace.Import{
|
err = b.WorkspaceClient().Workspace.Import(ctx, workspace.Import{
|
||||||
Path: artifact.RemotePath,
|
Path: artifact.RemotePath,
|
||||||
Overwrite: true,
|
Overwrite: true,
|
||||||
Format: workspace.ExportFormatSource,
|
Format: workspace.ImportFormatSource,
|
||||||
Language: artifact.Language,
|
Language: artifact.Language,
|
||||||
Content: base64.StdEncoding.EncodeToString(raw),
|
Content: base64.StdEncoding.EncodeToString(raw),
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("unable to import %s: %w", m.name, err)
|
return fmt.Errorf("unable to import %s: %w", m.name, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil, nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -27,15 +27,15 @@ func (m *upload) Name() string {
|
||||||
return fmt.Sprintf("artifacts.Upload(%s)", m.name)
|
return fmt.Sprintf("artifacts.Upload(%s)", m.name)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *upload) Apply(ctx context.Context, b *bundle.Bundle) ([]bundle.Mutator, error) {
|
func (m *upload) Apply(ctx context.Context, b *bundle.Bundle) error {
|
||||||
artifact, ok := b.Config.Artifacts[m.name]
|
artifact, ok := b.Config.Artifacts[m.name]
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, fmt.Errorf("artifact doesn't exist: %s", m.name)
|
return fmt.Errorf("artifact doesn't exist: %s", m.name)
|
||||||
}
|
}
|
||||||
|
|
||||||
if artifact.Notebook != nil {
|
if artifact.Notebook != nil {
|
||||||
return []bundle.Mutator{notebook.Upload(m.name)}, nil
|
return bundle.Apply(ctx, b, notebook.Upload(m.name))
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil, nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -91,8 +91,6 @@ func (b *Bundle) WorkspaceClient() *databricks.WorkspaceClient {
|
||||||
return b.client
|
return b.client
|
||||||
}
|
}
|
||||||
|
|
||||||
var cacheDirName = filepath.Join(".databricks", "bundle")
|
|
||||||
|
|
||||||
// CacheDir returns directory to use for temporary files for this bundle.
|
// CacheDir returns directory to use for temporary files for this bundle.
|
||||||
// Scoped to the bundle's environment.
|
// Scoped to the bundle's environment.
|
||||||
func (b *Bundle) CacheDir(paths ...string) (string, error) {
|
func (b *Bundle) CacheDir(paths ...string) (string, error) {
|
||||||
|
@ -100,11 +98,20 @@ func (b *Bundle) CacheDir(paths ...string) (string, error) {
|
||||||
panic("environment not set")
|
panic("environment not set")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fixed components of the result path.
|
cacheDirName, exists := os.LookupEnv("DATABRICKS_BUNDLE_TMP")
|
||||||
parts := []string{
|
|
||||||
|
if !exists || cacheDirName == "" {
|
||||||
|
cacheDirName = filepath.Join(
|
||||||
// Anchor at bundle root directory.
|
// Anchor at bundle root directory.
|
||||||
b.Config.Path,
|
b.Config.Path,
|
||||||
// Static cache directory.
|
// Static cache directory.
|
||||||
|
".databricks",
|
||||||
|
"bundle",
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fixed components of the result path.
|
||||||
|
parts := []string{
|
||||||
cacheDirName,
|
cacheDirName,
|
||||||
// Scope with environment name.
|
// Scope with environment name.
|
||||||
b.Config.Bundle.Environment,
|
b.Config.Bundle.Environment,
|
||||||
|
|
|
@ -3,7 +3,6 @@ package bundle
|
||||||
import (
|
import (
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strings"
|
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
|
@ -35,9 +34,38 @@ func TestBundleCacheDir(t *testing.T) {
|
||||||
// This is otherwise done by [mutators.SelectEnvironment].
|
// This is otherwise done by [mutators.SelectEnvironment].
|
||||||
bundle.Config.Bundle.Environment = "default"
|
bundle.Config.Bundle.Environment = "default"
|
||||||
|
|
||||||
|
// unset env variable in case it's set
|
||||||
|
t.Setenv("DATABRICKS_BUNDLE_TMP", "")
|
||||||
|
|
||||||
cacheDir, err := bundle.CacheDir()
|
cacheDir, err := bundle.CacheDir()
|
||||||
|
|
||||||
|
// format is <CWD>/.databricks/bundle/<environment>
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.True(t, strings.HasPrefix(cacheDir, projectDir))
|
assert.Equal(t, filepath.Join(projectDir, ".databricks", "bundle", "default"), cacheDir)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBundleCacheDirOverride(t *testing.T) {
|
||||||
|
projectDir := t.TempDir()
|
||||||
|
bundleTmpDir := t.TempDir()
|
||||||
|
f1, err := os.Create(filepath.Join(projectDir, "bundle.yml"))
|
||||||
|
require.NoError(t, err)
|
||||||
|
f1.Close()
|
||||||
|
|
||||||
|
bundle, err := Load(projectDir)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Artificially set environment.
|
||||||
|
// This is otherwise done by [mutators.SelectEnvironment].
|
||||||
|
bundle.Config.Bundle.Environment = "default"
|
||||||
|
|
||||||
|
// now we expect to use 'bundleTmpDir' instead of CWD/.databricks/bundle
|
||||||
|
t.Setenv("DATABRICKS_BUNDLE_TMP", bundleTmpDir)
|
||||||
|
|
||||||
|
cacheDir, err := bundle.CacheDir()
|
||||||
|
|
||||||
|
// format is <DATABRICKS_BUNDLE_TMP>/<environment>
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t, filepath.Join(bundleTmpDir, "default"), cacheDir)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestBundleMustLoadSuccess(t *testing.T) {
|
func TestBundleMustLoadSuccess(t *testing.T) {
|
||||||
|
|
|
@ -247,6 +247,6 @@ func (m *interpolate) Name() string {
|
||||||
return "Interpolate"
|
return "Interpolate"
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *interpolate) Apply(_ context.Context, b *bundle.Bundle) ([]bundle.Mutator, error) {
|
func (m *interpolate) Apply(_ context.Context, b *bundle.Bundle) error {
|
||||||
return nil, m.expand(&b.Config)
|
return m.expand(&b.Config)
|
||||||
}
|
}
|
||||||
|
|
|
@ -24,14 +24,14 @@ func (m *defineDefaultEnvironment) Name() string {
|
||||||
return fmt.Sprintf("DefineDefaultEnvironment(%s)", m.name)
|
return fmt.Sprintf("DefineDefaultEnvironment(%s)", m.name)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *defineDefaultEnvironment) Apply(_ context.Context, b *bundle.Bundle) ([]bundle.Mutator, error) {
|
func (m *defineDefaultEnvironment) Apply(_ context.Context, b *bundle.Bundle) error {
|
||||||
// Nothing to do if the configuration has at least 1 environment.
|
// Nothing to do if the configuration has at least 1 environment.
|
||||||
if len(b.Config.Environments) > 0 {
|
if len(b.Config.Environments) > 0 {
|
||||||
return nil, nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Define default environment.
|
// Define default environment.
|
||||||
b.Config.Environments = make(map[string]*config.Environment)
|
b.Config.Environments = make(map[string]*config.Environment)
|
||||||
b.Config.Environments[m.name] = &config.Environment{}
|
b.Config.Environments[m.name] = &config.Environment{}
|
||||||
return nil, nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -13,7 +13,7 @@ import (
|
||||||
|
|
||||||
func TestDefaultEnvironment(t *testing.T) {
|
func TestDefaultEnvironment(t *testing.T) {
|
||||||
bundle := &bundle.Bundle{}
|
bundle := &bundle.Bundle{}
|
||||||
_, err := mutator.DefineDefaultEnvironment().Apply(context.Background(), bundle)
|
err := mutator.DefineDefaultEnvironment().Apply(context.Background(), bundle)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
env, ok := bundle.Config.Environments["default"]
|
env, ok := bundle.Config.Environments["default"]
|
||||||
assert.True(t, ok)
|
assert.True(t, ok)
|
||||||
|
@ -28,7 +28,7 @@ func TestDefaultEnvironmentAlreadySpecified(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
_, err := mutator.DefineDefaultEnvironment().Apply(context.Background(), bundle)
|
err := mutator.DefineDefaultEnvironment().Apply(context.Background(), bundle)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
_, ok := bundle.Config.Environments["default"]
|
_, ok := bundle.Config.Environments["default"]
|
||||||
assert.False(t, ok)
|
assert.False(t, ok)
|
||||||
|
|
|
@ -28,9 +28,9 @@ func (m *defineDefaultInclude) Name() string {
|
||||||
return "DefineDefaultInclude"
|
return "DefineDefaultInclude"
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *defineDefaultInclude) Apply(_ context.Context, b *bundle.Bundle) ([]bundle.Mutator, error) {
|
func (m *defineDefaultInclude) Apply(_ context.Context, b *bundle.Bundle) error {
|
||||||
if len(b.Config.Include) == 0 {
|
if len(b.Config.Include) == 0 {
|
||||||
b.Config.Include = slices.Clone(m.include)
|
b.Config.Include = slices.Clone(m.include)
|
||||||
}
|
}
|
||||||
return nil, nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -12,7 +12,7 @@ import (
|
||||||
|
|
||||||
func TestDefaultInclude(t *testing.T) {
|
func TestDefaultInclude(t *testing.T) {
|
||||||
bundle := &bundle.Bundle{}
|
bundle := &bundle.Bundle{}
|
||||||
_, err := mutator.DefineDefaultInclude().Apply(context.Background(), bundle)
|
err := mutator.DefineDefaultInclude().Apply(context.Background(), bundle)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, []string{"*.yml", "*/*.yml"}, bundle.Config.Include)
|
assert.Equal(t, []string{"*.yml", "*/*.yml"}, bundle.Config.Include)
|
||||||
}
|
}
|
||||||
|
|
|
@ -19,10 +19,10 @@ func (m *defineDefaultWorkspacePaths) Name() string {
|
||||||
return "DefaultWorkspacePaths"
|
return "DefaultWorkspacePaths"
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *defineDefaultWorkspacePaths) Apply(ctx context.Context, b *bundle.Bundle) ([]bundle.Mutator, error) {
|
func (m *defineDefaultWorkspacePaths) Apply(ctx context.Context, b *bundle.Bundle) error {
|
||||||
root := b.Config.Workspace.RootPath
|
root := b.Config.Workspace.RootPath
|
||||||
if root == "" {
|
if root == "" {
|
||||||
return nil, fmt.Errorf("unable to define default workspace paths: workspace root not defined")
|
return fmt.Errorf("unable to define default workspace paths: workspace root not defined")
|
||||||
}
|
}
|
||||||
|
|
||||||
if b.Config.Workspace.FilesPath == "" {
|
if b.Config.Workspace.FilesPath == "" {
|
||||||
|
@ -37,5 +37,5 @@ func (m *defineDefaultWorkspacePaths) Apply(ctx context.Context, b *bundle.Bundl
|
||||||
b.Config.Workspace.StatePath = path.Join(root, "state")
|
b.Config.Workspace.StatePath = path.Join(root, "state")
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil, nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -19,7 +19,7 @@ func TestDefineDefaultWorkspacePaths(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
_, err := mutator.DefineDefaultWorkspacePaths().Apply(context.Background(), bundle)
|
err := mutator.DefineDefaultWorkspacePaths().Apply(context.Background(), bundle)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, "/files", bundle.Config.Workspace.FilesPath)
|
assert.Equal(t, "/files", bundle.Config.Workspace.FilesPath)
|
||||||
assert.Equal(t, "/artifacts", bundle.Config.Workspace.ArtifactsPath)
|
assert.Equal(t, "/artifacts", bundle.Config.Workspace.ArtifactsPath)
|
||||||
|
@ -37,7 +37,7 @@ func TestDefineDefaultWorkspacePathsAlreadySet(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
_, err := mutator.DefineDefaultWorkspacePaths().Apply(context.Background(), bundle)
|
err := mutator.DefineDefaultWorkspacePaths().Apply(context.Background(), bundle)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, "/foo/bar", bundle.Config.Workspace.FilesPath)
|
assert.Equal(t, "/foo/bar", bundle.Config.Workspace.FilesPath)
|
||||||
assert.Equal(t, "/foo/bar", bundle.Config.Workspace.ArtifactsPath)
|
assert.Equal(t, "/foo/bar", bundle.Config.Workspace.ArtifactsPath)
|
||||||
|
|
|
@ -18,17 +18,17 @@ func (m *defineDefaultWorkspaceRoot) Name() string {
|
||||||
return "DefineDefaultWorkspaceRoot"
|
return "DefineDefaultWorkspaceRoot"
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *defineDefaultWorkspaceRoot) Apply(ctx context.Context, b *bundle.Bundle) ([]bundle.Mutator, error) {
|
func (m *defineDefaultWorkspaceRoot) Apply(ctx context.Context, b *bundle.Bundle) error {
|
||||||
if b.Config.Workspace.RootPath != "" {
|
if b.Config.Workspace.RootPath != "" {
|
||||||
return nil, nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if b.Config.Bundle.Name == "" {
|
if b.Config.Bundle.Name == "" {
|
||||||
return nil, fmt.Errorf("unable to define default workspace root: bundle name not defined")
|
return fmt.Errorf("unable to define default workspace root: bundle name not defined")
|
||||||
}
|
}
|
||||||
|
|
||||||
if b.Config.Bundle.Environment == "" {
|
if b.Config.Bundle.Environment == "" {
|
||||||
return nil, fmt.Errorf("unable to define default workspace root: bundle environment not selected")
|
return fmt.Errorf("unable to define default workspace root: bundle environment not selected")
|
||||||
}
|
}
|
||||||
|
|
||||||
b.Config.Workspace.RootPath = fmt.Sprintf(
|
b.Config.Workspace.RootPath = fmt.Sprintf(
|
||||||
|
@ -36,5 +36,5 @@ func (m *defineDefaultWorkspaceRoot) Apply(ctx context.Context, b *bundle.Bundle
|
||||||
b.Config.Bundle.Name,
|
b.Config.Bundle.Name,
|
||||||
b.Config.Bundle.Environment,
|
b.Config.Bundle.Environment,
|
||||||
)
|
)
|
||||||
return nil, nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -20,7 +20,7 @@ func TestDefaultWorkspaceRoot(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
_, err := mutator.DefineDefaultWorkspaceRoot().Apply(context.Background(), bundle)
|
err := mutator.DefineDefaultWorkspaceRoot().Apply(context.Background(), bundle)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, "~/.bundle/name/environment", bundle.Config.Workspace.RootPath)
|
assert.Equal(t, "~/.bundle/name/environment", bundle.Config.Workspace.RootPath)
|
||||||
}
|
}
|
||||||
|
|
|
@ -20,15 +20,15 @@ func (m *expandWorkspaceRoot) Name() string {
|
||||||
return "ExpandWorkspaceRoot"
|
return "ExpandWorkspaceRoot"
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *expandWorkspaceRoot) Apply(ctx context.Context, b *bundle.Bundle) ([]bundle.Mutator, error) {
|
func (m *expandWorkspaceRoot) Apply(ctx context.Context, b *bundle.Bundle) error {
|
||||||
root := b.Config.Workspace.RootPath
|
root := b.Config.Workspace.RootPath
|
||||||
if root == "" {
|
if root == "" {
|
||||||
return nil, fmt.Errorf("unable to expand workspace root: workspace root not defined")
|
return fmt.Errorf("unable to expand workspace root: workspace root not defined")
|
||||||
}
|
}
|
||||||
|
|
||||||
currentUser := b.Config.Workspace.CurrentUser
|
currentUser := b.Config.Workspace.CurrentUser
|
||||||
if currentUser == nil || currentUser.UserName == "" {
|
if currentUser == nil || currentUser.UserName == "" {
|
||||||
return nil, fmt.Errorf("unable to expand workspace root: current user not set")
|
return fmt.Errorf("unable to expand workspace root: current user not set")
|
||||||
}
|
}
|
||||||
|
|
||||||
if strings.HasPrefix(root, "~/") {
|
if strings.HasPrefix(root, "~/") {
|
||||||
|
@ -36,5 +36,5 @@ func (m *expandWorkspaceRoot) Apply(ctx context.Context, b *bundle.Bundle) ([]bu
|
||||||
b.Config.Workspace.RootPath = path.Join(home, root[2:])
|
b.Config.Workspace.RootPath = path.Join(home, root[2:])
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil, nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -23,7 +23,7 @@ func TestExpandWorkspaceRoot(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
_, err := mutator.ExpandWorkspaceRoot().Apply(context.Background(), bundle)
|
err := mutator.ExpandWorkspaceRoot().Apply(context.Background(), bundle)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, "/Users/jane@doe.com/foo", bundle.Config.Workspace.RootPath)
|
assert.Equal(t, "/Users/jane@doe.com/foo", bundle.Config.Workspace.RootPath)
|
||||||
}
|
}
|
||||||
|
@ -39,7 +39,7 @@ func TestExpandWorkspaceRootDoesNothing(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
_, err := mutator.ExpandWorkspaceRoot().Apply(context.Background(), bundle)
|
err := mutator.ExpandWorkspaceRoot().Apply(context.Background(), bundle)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, "/Users/charly@doe.com/foo", bundle.Config.Workspace.RootPath)
|
assert.Equal(t, "/Users/charly@doe.com/foo", bundle.Config.Workspace.RootPath)
|
||||||
}
|
}
|
||||||
|
@ -54,7 +54,7 @@ func TestExpandWorkspaceRootWithoutRoot(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
_, err := mutator.ExpandWorkspaceRoot().Apply(context.Background(), bundle)
|
err := mutator.ExpandWorkspaceRoot().Apply(context.Background(), bundle)
|
||||||
require.Error(t, err)
|
require.Error(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -66,6 +66,6 @@ func TestExpandWorkspaceRootWithoutCurrentUser(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
_, err := mutator.ExpandWorkspaceRoot().Apply(context.Background(), bundle)
|
err := mutator.ExpandWorkspaceRoot().Apply(context.Background(), bundle)
|
||||||
require.Error(t, err)
|
require.Error(t, err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -18,11 +18,11 @@ func (m *loadGitDetails) Name() string {
|
||||||
return "LoadGitDetails"
|
return "LoadGitDetails"
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *loadGitDetails) Apply(ctx context.Context, b *bundle.Bundle) ([]bundle.Mutator, error) {
|
func (m *loadGitDetails) Apply(ctx context.Context, b *bundle.Bundle) error {
|
||||||
// Load relevant git repository
|
// Load relevant git repository
|
||||||
repo, err := git.NewRepository(b.Config.Path)
|
repo, err := git.NewRepository(b.Config.Path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
// load branch name if undefined
|
// load branch name if undefined
|
||||||
if b.Config.Bundle.Git.Branch == "" {
|
if b.Config.Bundle.Git.Branch == "" {
|
||||||
|
@ -47,5 +47,5 @@ func (m *loadGitDetails) Apply(ctx context.Context, b *bundle.Bundle) ([]bundle.
|
||||||
remoteUrl := repo.OriginUrl()
|
remoteUrl := repo.OriginUrl()
|
||||||
b.Config.Bundle.Git.OriginURL = remoteUrl
|
b.Config.Bundle.Git.OriginURL = remoteUrl
|
||||||
}
|
}
|
||||||
return nil, nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -17,13 +17,13 @@ func (m *populateCurrentUser) Name() string {
|
||||||
return "PopulateCurrentUser"
|
return "PopulateCurrentUser"
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *populateCurrentUser) Apply(ctx context.Context, b *bundle.Bundle) ([]bundle.Mutator, error) {
|
func (m *populateCurrentUser) Apply(ctx context.Context, b *bundle.Bundle) error {
|
||||||
w := b.WorkspaceClient()
|
w := b.WorkspaceClient()
|
||||||
me, err := w.CurrentUser.Me(ctx)
|
me, err := w.CurrentUser.Me(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
b.Config.Workspace.CurrentUser = me
|
b.Config.Workspace.CurrentUser = me
|
||||||
return nil, nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -25,10 +25,10 @@ func (m *processInclude) Name() string {
|
||||||
return fmt.Sprintf("ProcessInclude(%s)", m.relPath)
|
return fmt.Sprintf("ProcessInclude(%s)", m.relPath)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *processInclude) Apply(_ context.Context, b *bundle.Bundle) ([]bundle.Mutator, error) {
|
func (m *processInclude) Apply(_ context.Context, b *bundle.Bundle) error {
|
||||||
this, err := config.Load(m.fullPath)
|
this, err := config.Load(m.fullPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
return nil, b.Config.Merge(this)
|
return b.Config.Merge(this)
|
||||||
}
|
}
|
||||||
|
|
|
@ -32,7 +32,7 @@ func TestProcessInclude(t *testing.T) {
|
||||||
f.Close()
|
f.Close()
|
||||||
|
|
||||||
assert.Equal(t, "foo", bundle.Config.Workspace.Host)
|
assert.Equal(t, "foo", bundle.Config.Workspace.Host)
|
||||||
_, err = mutator.ProcessInclude(fullPath, relPath).Apply(context.Background(), bundle)
|
err = mutator.ProcessInclude(fullPath, relPath).Apply(context.Background(), bundle)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, "bar", bundle.Config.Workspace.Host)
|
assert.Equal(t, "bar", bundle.Config.Workspace.Host)
|
||||||
}
|
}
|
||||||
|
|
|
@ -22,7 +22,7 @@ func (m *processRootIncludes) Name() string {
|
||||||
return "ProcessRootIncludes"
|
return "ProcessRootIncludes"
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *processRootIncludes) Apply(_ context.Context, b *bundle.Bundle) ([]bundle.Mutator, error) {
|
func (m *processRootIncludes) Apply(ctx context.Context, b *bundle.Bundle) error {
|
||||||
var out []bundle.Mutator
|
var out []bundle.Mutator
|
||||||
|
|
||||||
// Map with files we've already seen to avoid loading them twice.
|
// Map with files we've already seen to avoid loading them twice.
|
||||||
|
@ -40,13 +40,13 @@ func (m *processRootIncludes) Apply(_ context.Context, b *bundle.Bundle) ([]bund
|
||||||
for _, entry := range b.Config.Include {
|
for _, entry := range b.Config.Include {
|
||||||
// Include paths must be relative.
|
// Include paths must be relative.
|
||||||
if filepath.IsAbs(entry) {
|
if filepath.IsAbs(entry) {
|
||||||
return nil, fmt.Errorf("%s: includes must be relative paths", entry)
|
return fmt.Errorf("%s: includes must be relative paths", entry)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Anchor includes to the bundle root path.
|
// Anchor includes to the bundle root path.
|
||||||
matches, err := filepath.Glob(filepath.Join(b.Config.Path, entry))
|
matches, err := filepath.Glob(filepath.Join(b.Config.Path, entry))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Filter matches to ones we haven't seen yet.
|
// Filter matches to ones we haven't seen yet.
|
||||||
|
@ -54,7 +54,7 @@ func (m *processRootIncludes) Apply(_ context.Context, b *bundle.Bundle) ([]bund
|
||||||
for _, match := range matches {
|
for _, match := range matches {
|
||||||
rel, err := filepath.Rel(b.Config.Path, match)
|
rel, err := filepath.Rel(b.Config.Path, match)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
if _, ok := seen[rel]; ok {
|
if _, ok := seen[rel]; ok {
|
||||||
continue
|
continue
|
||||||
|
@ -74,5 +74,5 @@ func (m *processRootIncludes) Apply(_ context.Context, b *bundle.Bundle) ([]bund
|
||||||
// Swap out the original includes list with the expanded globs.
|
// Swap out the original includes list with the expanded globs.
|
||||||
b.Config.Include = files
|
b.Config.Include = files
|
||||||
|
|
||||||
return out, nil
|
return bundle.Apply(ctx, b, bundle.Seq(out...))
|
||||||
}
|
}
|
||||||
|
|
|
@ -26,7 +26,7 @@ func TestProcessRootIncludesEmpty(t *testing.T) {
|
||||||
Path: ".",
|
Path: ".",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
_, err := mutator.ProcessRootIncludes().Apply(context.Background(), bundle)
|
err := mutator.ProcessRootIncludes().Apply(context.Background(), bundle)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -46,7 +46,7 @@ func TestProcessRootIncludesAbs(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
_, err := mutator.ProcessRootIncludes().Apply(context.Background(), bundle)
|
err := mutator.ProcessRootIncludes().Apply(context.Background(), bundle)
|
||||||
require.Error(t, err)
|
require.Error(t, err)
|
||||||
assert.Contains(t, err.Error(), "must be relative paths")
|
assert.Contains(t, err.Error(), "must be relative paths")
|
||||||
}
|
}
|
||||||
|
@ -65,17 +65,9 @@ func TestProcessRootIncludesSingleGlob(t *testing.T) {
|
||||||
touch(t, bundle.Config.Path, "a.yml")
|
touch(t, bundle.Config.Path, "a.yml")
|
||||||
touch(t, bundle.Config.Path, "b.yml")
|
touch(t, bundle.Config.Path, "b.yml")
|
||||||
|
|
||||||
ms, err := mutator.ProcessRootIncludes().Apply(context.Background(), bundle)
|
err := mutator.ProcessRootIncludes().Apply(context.Background(), bundle)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
var names []string
|
|
||||||
for _, m := range ms {
|
|
||||||
names = append(names, m.Name())
|
|
||||||
}
|
|
||||||
|
|
||||||
assert.NotContains(t, names, "ProcessInclude(bundle.yml)")
|
|
||||||
assert.Contains(t, names, "ProcessInclude(a.yml)")
|
|
||||||
assert.Contains(t, names, "ProcessInclude(b.yml)")
|
|
||||||
assert.Equal(t, []string{"a.yml", "b.yml"}, bundle.Config.Include)
|
assert.Equal(t, []string{"a.yml", "b.yml"}, bundle.Config.Include)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -93,16 +85,9 @@ func TestProcessRootIncludesMultiGlob(t *testing.T) {
|
||||||
touch(t, bundle.Config.Path, "a1.yml")
|
touch(t, bundle.Config.Path, "a1.yml")
|
||||||
touch(t, bundle.Config.Path, "b1.yml")
|
touch(t, bundle.Config.Path, "b1.yml")
|
||||||
|
|
||||||
ms, err := mutator.ProcessRootIncludes().Apply(context.Background(), bundle)
|
err := mutator.ProcessRootIncludes().Apply(context.Background(), bundle)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
var names []string
|
|
||||||
for _, m := range ms {
|
|
||||||
names = append(names, m.Name())
|
|
||||||
}
|
|
||||||
|
|
||||||
assert.Contains(t, names, "ProcessInclude(a1.yml)")
|
|
||||||
assert.Contains(t, names, "ProcessInclude(b1.yml)")
|
|
||||||
assert.Equal(t, []string{"a1.yml", "b1.yml"}, bundle.Config.Include)
|
assert.Equal(t, []string{"a1.yml", "b1.yml"}, bundle.Config.Include)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -119,9 +104,7 @@ func TestProcessRootIncludesRemoveDups(t *testing.T) {
|
||||||
|
|
||||||
touch(t, bundle.Config.Path, "a.yml")
|
touch(t, bundle.Config.Path, "a.yml")
|
||||||
|
|
||||||
ms, err := mutator.ProcessRootIncludes().Apply(context.Background(), bundle)
|
err := mutator.ProcessRootIncludes().Apply(context.Background(), bundle)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Len(t, ms, 1)
|
|
||||||
assert.Equal(t, "ProcessInclude(a.yml)", ms[0].Name())
|
|
||||||
assert.Equal(t, []string{"a.yml"}, bundle.Config.Include)
|
assert.Equal(t, []string{"a.yml"}, bundle.Config.Include)
|
||||||
}
|
}
|
||||||
|
|
|
@ -20,15 +20,15 @@ func (m *selectDefaultEnvironment) Name() string {
|
||||||
return "SelectDefaultEnvironment"
|
return "SelectDefaultEnvironment"
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *selectDefaultEnvironment) Apply(_ context.Context, b *bundle.Bundle) ([]bundle.Mutator, error) {
|
func (m *selectDefaultEnvironment) Apply(ctx context.Context, b *bundle.Bundle) error {
|
||||||
if len(b.Config.Environments) == 0 {
|
if len(b.Config.Environments) == 0 {
|
||||||
return nil, fmt.Errorf("no environments defined")
|
return fmt.Errorf("no environments defined")
|
||||||
}
|
}
|
||||||
|
|
||||||
// One environment means there's only one default.
|
// One environment means there's only one default.
|
||||||
names := maps.Keys(b.Config.Environments)
|
names := maps.Keys(b.Config.Environments)
|
||||||
if len(names) == 1 {
|
if len(names) == 1 {
|
||||||
return []bundle.Mutator{SelectEnvironment(names[0])}, nil
|
return SelectEnvironment(names[0]).Apply(ctx, b)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Multiple environments means we look for the `default` flag.
|
// Multiple environments means we look for the `default` flag.
|
||||||
|
@ -41,14 +41,14 @@ func (m *selectDefaultEnvironment) Apply(_ context.Context, b *bundle.Bundle) ([
|
||||||
|
|
||||||
// It is invalid to have multiple environments with the `default` flag set.
|
// It is invalid to have multiple environments with the `default` flag set.
|
||||||
if len(defaults) > 1 {
|
if len(defaults) > 1 {
|
||||||
return nil, fmt.Errorf("multiple environments are marked as default (%s)", strings.Join(defaults, ", "))
|
return fmt.Errorf("multiple environments are marked as default (%s)", strings.Join(defaults, ", "))
|
||||||
}
|
}
|
||||||
|
|
||||||
// If no environment has the `default` flag set, ask the user to specify one.
|
// If no environment has the `default` flag set, ask the user to specify one.
|
||||||
if len(defaults) == 0 {
|
if len(defaults) == 0 {
|
||||||
return nil, fmt.Errorf("please specify environment")
|
return fmt.Errorf("please specify environment")
|
||||||
}
|
}
|
||||||
|
|
||||||
// One default remaining.
|
// One default remaining.
|
||||||
return []bundle.Mutator{SelectEnvironment(defaults[0])}, nil
|
return SelectEnvironment(defaults[0]).Apply(ctx, b)
|
||||||
}
|
}
|
||||||
|
|
|
@ -16,7 +16,7 @@ func TestSelectDefaultEnvironmentNoEnvironments(t *testing.T) {
|
||||||
Environments: map[string]*config.Environment{},
|
Environments: map[string]*config.Environment{},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
_, err := mutator.SelectDefaultEnvironment().Apply(context.Background(), bundle)
|
err := mutator.SelectDefaultEnvironment().Apply(context.Background(), bundle)
|
||||||
assert.ErrorContains(t, err, "no environments defined")
|
assert.ErrorContains(t, err, "no environments defined")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -28,10 +28,9 @@ func TestSelectDefaultEnvironmentSingleEnvironments(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
ms, err := mutator.SelectDefaultEnvironment().Apply(context.Background(), bundle)
|
err := mutator.SelectDefaultEnvironment().Apply(context.Background(), bundle)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.Len(t, ms, 1)
|
assert.Equal(t, "foo", bundle.Config.Bundle.Environment)
|
||||||
assert.Equal(t, "SelectEnvironment(foo)", ms[0].Name())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestSelectDefaultEnvironmentNoDefaults(t *testing.T) {
|
func TestSelectDefaultEnvironmentNoDefaults(t *testing.T) {
|
||||||
|
@ -44,7 +43,7 @@ func TestSelectDefaultEnvironmentNoDefaults(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
_, err := mutator.SelectDefaultEnvironment().Apply(context.Background(), bundle)
|
err := mutator.SelectDefaultEnvironment().Apply(context.Background(), bundle)
|
||||||
assert.ErrorContains(t, err, "please specify environment")
|
assert.ErrorContains(t, err, "please specify environment")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -57,7 +56,7 @@ func TestSelectDefaultEnvironmentNoDefaultsWithNil(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
_, err := mutator.SelectDefaultEnvironment().Apply(context.Background(), bundle)
|
err := mutator.SelectDefaultEnvironment().Apply(context.Background(), bundle)
|
||||||
assert.ErrorContains(t, err, "please specify environment")
|
assert.ErrorContains(t, err, "please specify environment")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -71,7 +70,7 @@ func TestSelectDefaultEnvironmentMultipleDefaults(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
_, err := mutator.SelectDefaultEnvironment().Apply(context.Background(), bundle)
|
err := mutator.SelectDefaultEnvironment().Apply(context.Background(), bundle)
|
||||||
assert.ErrorContains(t, err, "multiple environments are marked as default")
|
assert.ErrorContains(t, err, "multiple environments are marked as default")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -85,8 +84,7 @@ func TestSelectDefaultEnvironmentSingleDefault(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
ms, err := mutator.SelectDefaultEnvironment().Apply(context.Background(), bundle)
|
err := mutator.SelectDefaultEnvironment().Apply(context.Background(), bundle)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.Len(t, ms, 1)
|
assert.Equal(t, "bar", bundle.Config.Bundle.Environment)
|
||||||
assert.Equal(t, "SelectEnvironment(bar)", ms[0].Name())
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -22,21 +22,21 @@ func (m *selectEnvironment) Name() string {
|
||||||
return fmt.Sprintf("SelectEnvironment(%s)", m.name)
|
return fmt.Sprintf("SelectEnvironment(%s)", m.name)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *selectEnvironment) Apply(_ context.Context, b *bundle.Bundle) ([]bundle.Mutator, error) {
|
func (m *selectEnvironment) Apply(_ context.Context, b *bundle.Bundle) error {
|
||||||
if b.Config.Environments == nil {
|
if b.Config.Environments == nil {
|
||||||
return nil, fmt.Errorf("no environments defined")
|
return fmt.Errorf("no environments defined")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get specified environment
|
// Get specified environment
|
||||||
env, ok := b.Config.Environments[m.name]
|
env, ok := b.Config.Environments[m.name]
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, fmt.Errorf("%s: no such environment", m.name)
|
return fmt.Errorf("%s: no such environment", m.name)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Merge specified environment into root configuration structure.
|
// Merge specified environment into root configuration structure.
|
||||||
err := b.Config.MergeEnvironment(env)
|
err := b.Config.MergeEnvironment(env)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Store specified environment in configuration for reference.
|
// Store specified environment in configuration for reference.
|
||||||
|
@ -44,5 +44,5 @@ func (m *selectEnvironment) Apply(_ context.Context, b *bundle.Bundle) ([]bundle
|
||||||
|
|
||||||
// Clear environments after loading.
|
// Clear environments after loading.
|
||||||
b.Config.Environments = nil
|
b.Config.Environments = nil
|
||||||
return nil, nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -26,7 +26,7 @@ func TestSelectEnvironment(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
_, err := mutator.SelectEnvironment("default").Apply(context.Background(), bundle)
|
err := mutator.SelectEnvironment("default").Apply(context.Background(), bundle)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, "bar", bundle.Config.Workspace.Host)
|
assert.Equal(t, "bar", bundle.Config.Workspace.Host)
|
||||||
}
|
}
|
||||||
|
@ -39,6 +39,6 @@ func TestSelectEnvironmentNotFound(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
_, err := mutator.SelectEnvironment("doesnt-exist").Apply(context.Background(), bundle)
|
err := mutator.SelectEnvironment("doesnt-exist").Apply(context.Background(), bundle)
|
||||||
require.Error(t, err, "no environments defined")
|
require.Error(t, err, "no environments defined")
|
||||||
}
|
}
|
||||||
|
|
|
@ -52,12 +52,12 @@ func setVariable(v *variable.Variable, name string) error {
|
||||||
return fmt.Errorf(`no value assigned to required variable %s. Assignment can be done through the "--var" flag or by setting the %s environment variable`, name, bundleVarPrefix+name)
|
return fmt.Errorf(`no value assigned to required variable %s. Assignment can be done through the "--var" flag or by setting the %s environment variable`, name, bundleVarPrefix+name)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *setVariables) Apply(ctx context.Context, b *bundle.Bundle) ([]bundle.Mutator, error) {
|
func (m *setVariables) Apply(ctx context.Context, b *bundle.Bundle) error {
|
||||||
for name, variable := range b.Config.Variables {
|
for name, variable := range b.Config.Variables {
|
||||||
err := setVariable(variable, name)
|
err := setVariable(variable, name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil, nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -108,7 +108,7 @@ func TestSetVariablesMutator(t *testing.T) {
|
||||||
|
|
||||||
t.Setenv("BUNDLE_VAR_b", "env-var-b")
|
t.Setenv("BUNDLE_VAR_b", "env-var-b")
|
||||||
|
|
||||||
_, err := SetVariables().Apply(context.Background(), bundle)
|
err := SetVariables().Apply(context.Background(), bundle)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, "default-a", *bundle.Config.Variables["a"].Value)
|
assert.Equal(t, "default-a", *bundle.Config.Variables["a"].Value)
|
||||||
assert.Equal(t, "env-var-b", *bundle.Config.Variables["b"].Value)
|
assert.Equal(t, "env-var-b", *bundle.Config.Variables["b"].Value)
|
||||||
|
|
|
@ -145,19 +145,24 @@ func (m *translatePaths) translatePipelineLibrary(dir string, b *bundle.Bundle,
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *translatePaths) Apply(_ context.Context, b *bundle.Bundle) ([]bundle.Mutator, error) {
|
func (m *translatePaths) Apply(_ context.Context, b *bundle.Bundle) error {
|
||||||
m.seen = make(map[string]string)
|
m.seen = make(map[string]string)
|
||||||
|
|
||||||
for key, job := range b.Config.Resources.Jobs {
|
for key, job := range b.Config.Resources.Jobs {
|
||||||
dir, err := job.ConfigFileDirectory()
|
dir, err := job.ConfigFileDirectory()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("unable to determine directory for job %s: %w", key, err)
|
return fmt.Errorf("unable to determine directory for job %s: %w", key, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Do not translate job task paths if using git source
|
||||||
|
if job.GitSource != nil {
|
||||||
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
for i := 0; i < len(job.Tasks); i++ {
|
for i := 0; i < len(job.Tasks); i++ {
|
||||||
err := m.translateJobTask(dir, b, &job.Tasks[i])
|
err := m.translateJobTask(dir, b, &job.Tasks[i])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -165,16 +170,16 @@ func (m *translatePaths) Apply(_ context.Context, b *bundle.Bundle) ([]bundle.Mu
|
||||||
for key, pipeline := range b.Config.Resources.Pipelines {
|
for key, pipeline := range b.Config.Resources.Pipelines {
|
||||||
dir, err := pipeline.ConfigFileDirectory()
|
dir, err := pipeline.ConfigFileDirectory()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("unable to determine directory for pipeline %s: %w", key, err)
|
return fmt.Errorf("unable to determine directory for pipeline %s: %w", key, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
for i := 0; i < len(pipeline.Libraries); i++ {
|
for i := 0; i < len(pipeline.Libraries); i++ {
|
||||||
err := m.translatePipelineLibrary(dir, b, &pipeline.Libraries[i])
|
err := m.translatePipelineLibrary(dir, b, &pipeline.Libraries[i])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil, nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -31,6 +31,73 @@ func touchEmptyFile(t *testing.T, path string) {
|
||||||
f.Close()
|
f.Close()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestTranslatePathsSkippedWithGitSource(t *testing.T) {
|
||||||
|
dir := t.TempDir()
|
||||||
|
bundle := &bundle.Bundle{
|
||||||
|
Config: config.Root{
|
||||||
|
Path: dir,
|
||||||
|
Workspace: config.Workspace{
|
||||||
|
FilesPath: "/bundle",
|
||||||
|
},
|
||||||
|
Resources: config.Resources{
|
||||||
|
Jobs: map[string]*resources.Job{
|
||||||
|
"job": {
|
||||||
|
|
||||||
|
Paths: resources.Paths{
|
||||||
|
ConfigFilePath: filepath.Join(dir, "resource.yml"),
|
||||||
|
},
|
||||||
|
JobSettings: &jobs.JobSettings{
|
||||||
|
GitSource: &jobs.GitSource{
|
||||||
|
GitBranch: "somebranch",
|
||||||
|
GitCommit: "somecommit",
|
||||||
|
GitProvider: "github",
|
||||||
|
GitTag: "sometag",
|
||||||
|
GitUrl: "https://github.com/someuser/somerepo",
|
||||||
|
},
|
||||||
|
Tasks: []jobs.JobTaskSettings{
|
||||||
|
{
|
||||||
|
NotebookTask: &jobs.NotebookTask{
|
||||||
|
NotebookPath: "my_job_notebook.py",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
PythonWheelTask: &jobs.PythonWheelTask{
|
||||||
|
PackageName: "foo",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
SparkPythonTask: &jobs.SparkPythonTask{
|
||||||
|
PythonFile: "my_python_file.py",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
err := mutator.TranslatePaths().Apply(context.Background(), bundle)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
assert.Equal(
|
||||||
|
t,
|
||||||
|
"my_job_notebook.py",
|
||||||
|
bundle.Config.Resources.Jobs["job"].Tasks[0].NotebookTask.NotebookPath,
|
||||||
|
)
|
||||||
|
assert.Equal(
|
||||||
|
t,
|
||||||
|
"foo",
|
||||||
|
bundle.Config.Resources.Jobs["job"].Tasks[1].PythonWheelTask.PackageName,
|
||||||
|
)
|
||||||
|
assert.Equal(
|
||||||
|
t,
|
||||||
|
"my_python_file.py",
|
||||||
|
bundle.Config.Resources.Jobs["job"].Tasks[2].SparkPythonTask.PythonFile,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
func TestTranslatePaths(t *testing.T) {
|
func TestTranslatePaths(t *testing.T) {
|
||||||
dir := t.TempDir()
|
dir := t.TempDir()
|
||||||
touchNotebookFile(t, filepath.Join(dir, "my_job_notebook.py"))
|
touchNotebookFile(t, filepath.Join(dir, "my_job_notebook.py"))
|
||||||
|
@ -118,7 +185,7 @@ func TestTranslatePaths(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err := mutator.TranslatePaths().Apply(context.Background(), bundle)
|
err := mutator.TranslatePaths().Apply(context.Background(), bundle)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
// Assert that the path in the tasks now refer to the artifact.
|
// Assert that the path in the tasks now refer to the artifact.
|
||||||
|
@ -215,7 +282,7 @@ func TestTranslatePathsInSubdirectories(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err := mutator.TranslatePaths().Apply(context.Background(), bundle)
|
err := mutator.TranslatePaths().Apply(context.Background(), bundle)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
assert.Equal(
|
assert.Equal(
|
||||||
|
@ -261,7 +328,7 @@ func TestTranslatePathsOutsideBundleRoot(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err := mutator.TranslatePaths().Apply(context.Background(), bundle)
|
err := mutator.TranslatePaths().Apply(context.Background(), bundle)
|
||||||
assert.ErrorContains(t, err, "is not contained in bundle root")
|
assert.ErrorContains(t, err, "is not contained in bundle root")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -292,7 +359,7 @@ func TestJobNotebookDoesNotExistError(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err := mutator.TranslatePaths().Apply(context.Background(), bundle)
|
err := mutator.TranslatePaths().Apply(context.Background(), bundle)
|
||||||
assert.EqualError(t, err, "notebook ./doesnt_exist.py not found")
|
assert.EqualError(t, err, "notebook ./doesnt_exist.py not found")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -323,7 +390,7 @@ func TestJobFileDoesNotExistError(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err := mutator.TranslatePaths().Apply(context.Background(), bundle)
|
err := mutator.TranslatePaths().Apply(context.Background(), bundle)
|
||||||
assert.EqualError(t, err, "file ./doesnt_exist.py not found")
|
assert.EqualError(t, err, "file ./doesnt_exist.py not found")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -354,7 +421,7 @@ func TestPipelineNotebookDoesNotExistError(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err := mutator.TranslatePaths().Apply(context.Background(), bundle)
|
err := mutator.TranslatePaths().Apply(context.Background(), bundle)
|
||||||
assert.EqualError(t, err, "notebook ./doesnt_exist.py not found")
|
assert.EqualError(t, err, "notebook ./doesnt_exist.py not found")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -385,6 +452,6 @@ func TestPipelineFileDoesNotExistError(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err := mutator.TranslatePaths().Apply(context.Background(), bundle)
|
err := mutator.TranslatePaths().Apply(context.Background(), bundle)
|
||||||
assert.EqualError(t, err, "file ./doesnt_exist.py not found")
|
assert.EqualError(t, err, "file ./doesnt_exist.py not found")
|
||||||
}
|
}
|
||||||
|
|
|
@ -78,6 +78,9 @@ func (r *Root) SetConfigFilePath(path string) {
|
||||||
r.Resources.SetConfigFilePath(path)
|
r.Resources.SetConfigFilePath(path)
|
||||||
if r.Environments != nil {
|
if r.Environments != nil {
|
||||||
for _, env := range r.Environments {
|
for _, env := range r.Environments {
|
||||||
|
if env == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
if env.Resources != nil {
|
if env.Resources != nil {
|
||||||
env.Resources.SetConfigFilePath(path)
|
env.Resources.SetConfigFilePath(path)
|
||||||
}
|
}
|
||||||
|
|
|
@ -97,9 +97,9 @@ func (w *Workspace) Client() (*databricks.WorkspaceClient, error) {
|
||||||
func init() {
|
func init() {
|
||||||
arg0 := os.Args[0]
|
arg0 := os.Args[0]
|
||||||
|
|
||||||
// Configure BRICKS_CLI_PATH only if our caller intends to use this specific version of this binary.
|
// Configure DATABRICKS_CLI_PATH only if our caller intends to use this specific version of this binary.
|
||||||
// Otherwise, if it is equal to its basename, processes can find it in $PATH.
|
// Otherwise, if it is equal to its basename, processes can find it in $PATH.
|
||||||
if arg0 != filepath.Base(arg0) {
|
if arg0 != filepath.Base(arg0) {
|
||||||
os.Setenv("BRICKS_CLI_PATH", arg0)
|
os.Setenv("DATABRICKS_CLI_PATH", arg0)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -7,29 +7,27 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
type DeferredMutator struct {
|
type DeferredMutator struct {
|
||||||
mutators []Mutator
|
mutator Mutator
|
||||||
finally []Mutator
|
finally Mutator
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *DeferredMutator) Name() string {
|
func (d *DeferredMutator) Name() string {
|
||||||
return "deferred"
|
return "deferred"
|
||||||
}
|
}
|
||||||
|
|
||||||
func Defer(mutators []Mutator, finally []Mutator) []Mutator {
|
func Defer(mutator Mutator, finally Mutator) Mutator {
|
||||||
return []Mutator{
|
return &DeferredMutator{
|
||||||
&DeferredMutator{
|
mutator: mutator,
|
||||||
mutators: mutators,
|
|
||||||
finally: finally,
|
finally: finally,
|
||||||
},
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *DeferredMutator) Apply(ctx context.Context, b *Bundle) ([]Mutator, error) {
|
func (d *DeferredMutator) Apply(ctx context.Context, b *Bundle) error {
|
||||||
mainErr := Apply(ctx, b, d.mutators)
|
mainErr := Apply(ctx, b, d.mutator)
|
||||||
errOnFinish := Apply(ctx, b, d.finally)
|
errOnFinish := Apply(ctx, b, d.finally)
|
||||||
if mainErr != nil || errOnFinish != nil {
|
if mainErr != nil || errOnFinish != nil {
|
||||||
return nil, errs.FromMany(mainErr, errOnFinish)
|
return errs.FromMany(mainErr, errOnFinish)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil, nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -17,9 +17,9 @@ func (t *mutatorWithError) Name() string {
|
||||||
return "mutatorWithError"
|
return "mutatorWithError"
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *mutatorWithError) Apply(_ context.Context, b *Bundle) ([]Mutator, error) {
|
func (t *mutatorWithError) Apply(_ context.Context, b *Bundle) error {
|
||||||
t.applyCalled++
|
t.applyCalled++
|
||||||
return nil, fmt.Errorf(t.errorMsg)
|
return fmt.Errorf(t.errorMsg)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestDeferredMutatorWhenAllMutatorsSucceed(t *testing.T) {
|
func TestDeferredMutatorWhenAllMutatorsSucceed(t *testing.T) {
|
||||||
|
@ -27,7 +27,7 @@ func TestDeferredMutatorWhenAllMutatorsSucceed(t *testing.T) {
|
||||||
m2 := &testMutator{}
|
m2 := &testMutator{}
|
||||||
m3 := &testMutator{}
|
m3 := &testMutator{}
|
||||||
cleanup := &testMutator{}
|
cleanup := &testMutator{}
|
||||||
deferredMutator := Defer([]Mutator{m1, m2, m3}, []Mutator{cleanup})
|
deferredMutator := Defer(Seq(m1, m2, m3), cleanup)
|
||||||
|
|
||||||
bundle := &Bundle{}
|
bundle := &Bundle{}
|
||||||
err := Apply(context.Background(), bundle, deferredMutator)
|
err := Apply(context.Background(), bundle, deferredMutator)
|
||||||
|
@ -44,7 +44,7 @@ func TestDeferredMutatorWhenFirstFails(t *testing.T) {
|
||||||
m2 := &testMutator{}
|
m2 := &testMutator{}
|
||||||
mErr := &mutatorWithError{errorMsg: "mutator error occurred"}
|
mErr := &mutatorWithError{errorMsg: "mutator error occurred"}
|
||||||
cleanup := &testMutator{}
|
cleanup := &testMutator{}
|
||||||
deferredMutator := Defer([]Mutator{mErr, m1, m2}, []Mutator{cleanup})
|
deferredMutator := Defer(Seq(mErr, m1, m2), cleanup)
|
||||||
|
|
||||||
bundle := &Bundle{}
|
bundle := &Bundle{}
|
||||||
err := Apply(context.Background(), bundle, deferredMutator)
|
err := Apply(context.Background(), bundle, deferredMutator)
|
||||||
|
@ -61,7 +61,7 @@ func TestDeferredMutatorWhenMiddleOneFails(t *testing.T) {
|
||||||
m2 := &testMutator{}
|
m2 := &testMutator{}
|
||||||
mErr := &mutatorWithError{errorMsg: "mutator error occurred"}
|
mErr := &mutatorWithError{errorMsg: "mutator error occurred"}
|
||||||
cleanup := &testMutator{}
|
cleanup := &testMutator{}
|
||||||
deferredMutator := Defer([]Mutator{m1, mErr, m2}, []Mutator{cleanup})
|
deferredMutator := Defer(Seq(m1, mErr, m2), cleanup)
|
||||||
|
|
||||||
bundle := &Bundle{}
|
bundle := &Bundle{}
|
||||||
err := Apply(context.Background(), bundle, deferredMutator)
|
err := Apply(context.Background(), bundle, deferredMutator)
|
||||||
|
@ -78,7 +78,7 @@ func TestDeferredMutatorWhenLastOneFails(t *testing.T) {
|
||||||
m2 := &testMutator{}
|
m2 := &testMutator{}
|
||||||
mErr := &mutatorWithError{errorMsg: "mutator error occurred"}
|
mErr := &mutatorWithError{errorMsg: "mutator error occurred"}
|
||||||
cleanup := &testMutator{}
|
cleanup := &testMutator{}
|
||||||
deferredMutator := Defer([]Mutator{m1, m2, mErr}, []Mutator{cleanup})
|
deferredMutator := Defer(Seq(m1, m2, mErr), cleanup)
|
||||||
|
|
||||||
bundle := &Bundle{}
|
bundle := &Bundle{}
|
||||||
err := Apply(context.Background(), bundle, deferredMutator)
|
err := Apply(context.Background(), bundle, deferredMutator)
|
||||||
|
@ -95,7 +95,7 @@ func TestDeferredMutatorCombinesErrorMessages(t *testing.T) {
|
||||||
m2 := &testMutator{}
|
m2 := &testMutator{}
|
||||||
mErr := &mutatorWithError{errorMsg: "mutator error occurred"}
|
mErr := &mutatorWithError{errorMsg: "mutator error occurred"}
|
||||||
cleanupErr := &mutatorWithError{errorMsg: "cleanup error occurred"}
|
cleanupErr := &mutatorWithError{errorMsg: "cleanup error occurred"}
|
||||||
deferredMutator := Defer([]Mutator{m1, m2, mErr}, []Mutator{cleanupErr})
|
deferredMutator := Defer(Seq(m1, m2, mErr), cleanupErr)
|
||||||
|
|
||||||
bundle := &Bundle{}
|
bundle := &Bundle{}
|
||||||
err := Apply(context.Background(), bundle, deferredMutator)
|
err := Apply(context.Background(), bundle, deferredMutator)
|
||||||
|
|
|
@ -16,10 +16,10 @@ func (m *delete) Name() string {
|
||||||
return "files.Delete"
|
return "files.Delete"
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *delete) Apply(ctx context.Context, b *bundle.Bundle) ([]bundle.Mutator, error) {
|
func (m *delete) Apply(ctx context.Context, b *bundle.Bundle) error {
|
||||||
// Do not delete files if terraform destroy was not consented
|
// Do not delete files if terraform destroy was not consented
|
||||||
if !b.Plan.IsEmpty && !b.Plan.ConfirmApply {
|
if !b.Plan.IsEmpty && !b.Plan.ConfirmApply {
|
||||||
return nil, nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
cmdio.LogString(ctx, "Starting deletion of remote bundle files")
|
cmdio.LogString(ctx, "Starting deletion of remote bundle files")
|
||||||
|
@ -29,10 +29,10 @@ func (m *delete) Apply(ctx context.Context, b *bundle.Bundle) ([]bundle.Mutator,
|
||||||
if !b.AutoApprove {
|
if !b.AutoApprove {
|
||||||
proceed, err := cmdio.Ask(ctx, fmt.Sprintf("\n%s and all files in it will be %s Proceed?: ", b.Config.Workspace.RootPath, red("deleted permanently!")))
|
proceed, err := cmdio.Ask(ctx, fmt.Sprintf("\n%s and all files in it will be %s Proceed?: ", b.Config.Workspace.RootPath, red("deleted permanently!")))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
if !proceed {
|
if !proceed {
|
||||||
return nil, nil
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -41,22 +41,22 @@ func (m *delete) Apply(ctx context.Context, b *bundle.Bundle) ([]bundle.Mutator,
|
||||||
Recursive: true,
|
Recursive: true,
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Clean up sync snapshot file
|
// Clean up sync snapshot file
|
||||||
sync, err := getSync(ctx, b)
|
sync, err := getSync(ctx, b)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
err = sync.DestroySnapshot(ctx)
|
err = sync.DestroySnapshot(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
cmdio.LogString(ctx, fmt.Sprintf("Deleted snapshot file at %s", sync.SnapshotPath()))
|
cmdio.LogString(ctx, fmt.Sprintf("Deleted snapshot file at %s", sync.SnapshotPath()))
|
||||||
cmdio.LogString(ctx, "Successfully deleted files!")
|
cmdio.LogString(ctx, "Successfully deleted files!")
|
||||||
return nil, nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func Delete() bundle.Mutator {
|
func Delete() bundle.Mutator {
|
||||||
|
|
|
@ -14,20 +14,20 @@ func (m *upload) Name() string {
|
||||||
return "files.Upload"
|
return "files.Upload"
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *upload) Apply(ctx context.Context, b *bundle.Bundle) ([]bundle.Mutator, error) {
|
func (m *upload) Apply(ctx context.Context, b *bundle.Bundle) error {
|
||||||
cmdio.LogString(ctx, "Starting upload of bundle files")
|
cmdio.LogString(ctx, "Starting upload of bundle files")
|
||||||
sync, err := getSync(ctx, b)
|
sync, err := getSync(ctx, b)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
err = sync.RunOnce(ctx)
|
err = sync.RunOnce(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
cmdio.LogString(ctx, fmt.Sprintf("Uploaded bundle files at %s!\n", b.Config.Workspace.FilesPath))
|
cmdio.LogString(ctx, fmt.Sprintf("Uploaded bundle files at %s!\n", b.Config.Workspace.FilesPath))
|
||||||
return nil, nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func Upload() bundle.Mutator {
|
func Upload() bundle.Mutator {
|
||||||
|
|
|
@ -30,16 +30,16 @@ func (m *acquire) init(b *bundle.Bundle) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *acquire) Apply(ctx context.Context, b *bundle.Bundle) ([]bundle.Mutator, error) {
|
func (m *acquire) Apply(ctx context.Context, b *bundle.Bundle) error {
|
||||||
// Return early if locking is disabled.
|
// Return early if locking is disabled.
|
||||||
if !b.Config.Bundle.Lock.IsEnabled() {
|
if !b.Config.Bundle.Lock.IsEnabled() {
|
||||||
log.Infof(ctx, "Skipping; locking is disabled")
|
log.Infof(ctx, "Skipping; locking is disabled")
|
||||||
return nil, nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
err := m.init(b)
|
err := m.init(b)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
force := b.Config.Bundle.Lock.Force
|
force := b.Config.Bundle.Lock.Force
|
||||||
|
@ -47,8 +47,8 @@ func (m *acquire) Apply(ctx context.Context, b *bundle.Bundle) ([]bundle.Mutator
|
||||||
err = b.Locker.Lock(ctx, force)
|
err = b.Locker.Lock(ctx, force)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf(ctx, "Failed to acquire deployment lock: %v", err)
|
log.Errorf(ctx, "Failed to acquire deployment lock: %v", err)
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil, nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -2,41 +2,53 @@ package lock
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle"
|
"github.com/databricks/cli/bundle"
|
||||||
|
"github.com/databricks/cli/libs/locker"
|
||||||
"github.com/databricks/cli/libs/log"
|
"github.com/databricks/cli/libs/log"
|
||||||
)
|
)
|
||||||
|
|
||||||
type release struct{}
|
type Goal string
|
||||||
|
|
||||||
func Release() bundle.Mutator {
|
const (
|
||||||
return &release{}
|
GoalDeploy = Goal("deploy")
|
||||||
|
GoalDestroy = Goal("destroy")
|
||||||
|
)
|
||||||
|
|
||||||
|
type release struct {
|
||||||
|
goal Goal
|
||||||
|
}
|
||||||
|
|
||||||
|
func Release(goal Goal) bundle.Mutator {
|
||||||
|
return &release{goal}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *release) Name() string {
|
func (m *release) Name() string {
|
||||||
return "lock:release"
|
return "lock:release"
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *release) Apply(ctx context.Context, b *bundle.Bundle) ([]bundle.Mutator, error) {
|
func (m *release) Apply(ctx context.Context, b *bundle.Bundle) error {
|
||||||
// Return early if locking is disabled.
|
// Return early if locking is disabled.
|
||||||
if !b.Config.Bundle.Lock.IsEnabled() {
|
if !b.Config.Bundle.Lock.IsEnabled() {
|
||||||
log.Infof(ctx, "Skipping; locking is disabled")
|
log.Infof(ctx, "Skipping; locking is disabled")
|
||||||
return nil, nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Return early if the locker is not set.
|
// Return early if the locker is not set.
|
||||||
// It is likely an error occurred prior to initialization of the locker instance.
|
// It is likely an error occurred prior to initialization of the locker instance.
|
||||||
if b.Locker == nil {
|
if b.Locker == nil {
|
||||||
log.Warnf(ctx, "Unable to release lock if locker is not configured")
|
log.Warnf(ctx, "Unable to release lock if locker is not configured")
|
||||||
return nil, nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Infof(ctx, "Releasing deployment lock")
|
log.Infof(ctx, "Releasing deployment lock")
|
||||||
err := b.Locker.Unlock(ctx)
|
switch m.goal {
|
||||||
if err != nil {
|
case GoalDeploy:
|
||||||
log.Errorf(ctx, "Failed to release deployment lock: %v", err)
|
return b.Locker.Unlock(ctx)
|
||||||
return nil, err
|
case GoalDestroy:
|
||||||
|
return b.Locker.Unlock(ctx, locker.AllowLockFileNotExist)
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("unknown goal for lock release: %s", m.goal)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil, nil
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -15,26 +15,26 @@ func (w *apply) Name() string {
|
||||||
return "terraform.Apply"
|
return "terraform.Apply"
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *apply) Apply(ctx context.Context, b *bundle.Bundle) ([]bundle.Mutator, error) {
|
func (w *apply) Apply(ctx context.Context, b *bundle.Bundle) error {
|
||||||
tf := b.Terraform
|
tf := b.Terraform
|
||||||
if tf == nil {
|
if tf == nil {
|
||||||
return nil, fmt.Errorf("terraform not initialized")
|
return fmt.Errorf("terraform not initialized")
|
||||||
}
|
}
|
||||||
|
|
||||||
cmdio.LogString(ctx, "Starting resource deployment")
|
cmdio.LogString(ctx, "Starting resource deployment")
|
||||||
|
|
||||||
err := tf.Init(ctx, tfexec.Upgrade(true))
|
err := tf.Init(ctx, tfexec.Upgrade(true))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("terraform init: %w", err)
|
return fmt.Errorf("terraform init: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
err = tf.Apply(ctx)
|
err = tf.Apply(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("terraform apply: %w", err)
|
return fmt.Errorf("terraform apply: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
cmdio.LogString(ctx, "Resource deployment completed!")
|
cmdio.LogString(ctx, "Resource deployment completed!")
|
||||||
return nil, nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Apply returns a [bundle.Mutator] that runs the equivalent of `terraform apply`
|
// Apply returns a [bundle.Mutator] that runs the equivalent of `terraform apply`
|
||||||
|
|
|
@ -62,28 +62,28 @@ func (w *destroy) Name() string {
|
||||||
return "terraform.Destroy"
|
return "terraform.Destroy"
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *destroy) Apply(ctx context.Context, b *bundle.Bundle) ([]bundle.Mutator, error) {
|
func (w *destroy) Apply(ctx context.Context, b *bundle.Bundle) error {
|
||||||
// return early if plan is empty
|
// return early if plan is empty
|
||||||
if b.Plan.IsEmpty {
|
if b.Plan.IsEmpty {
|
||||||
cmdio.LogString(ctx, "No resources to destroy in plan. Skipping destroy!")
|
cmdio.LogString(ctx, "No resources to destroy in plan. Skipping destroy!")
|
||||||
return nil, nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
tf := b.Terraform
|
tf := b.Terraform
|
||||||
if tf == nil {
|
if tf == nil {
|
||||||
return nil, fmt.Errorf("terraform not initialized")
|
return fmt.Errorf("terraform not initialized")
|
||||||
}
|
}
|
||||||
|
|
||||||
// read plan file
|
// read plan file
|
||||||
plan, err := tf.ShowPlanFile(ctx, b.Plan.Path)
|
plan, err := tf.ShowPlanFile(ctx, b.Plan.Path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// print the resources that will be destroyed
|
// print the resources that will be destroyed
|
||||||
err = logDestroyPlan(ctx, plan.ResourceChanges)
|
err = logDestroyPlan(ctx, plan.ResourceChanges)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ask for confirmation, if needed
|
// Ask for confirmation, if needed
|
||||||
|
@ -91,17 +91,17 @@ func (w *destroy) Apply(ctx context.Context, b *bundle.Bundle) ([]bundle.Mutator
|
||||||
red := color.New(color.FgRed).SprintFunc()
|
red := color.New(color.FgRed).SprintFunc()
|
||||||
b.Plan.ConfirmApply, err = cmdio.Ask(ctx, fmt.Sprintf("\nThis will permanently %s resources! Proceed? [y/n]: ", red("destroy")))
|
b.Plan.ConfirmApply, err = cmdio.Ask(ctx, fmt.Sprintf("\nThis will permanently %s resources! Proceed? [y/n]: ", red("destroy")))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// return if confirmation was not provided
|
// return if confirmation was not provided
|
||||||
if !b.Plan.ConfirmApply {
|
if !b.Plan.ConfirmApply {
|
||||||
return nil, nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if b.Plan.Path == "" {
|
if b.Plan.Path == "" {
|
||||||
return nil, fmt.Errorf("no plan found")
|
return fmt.Errorf("no plan found")
|
||||||
}
|
}
|
||||||
|
|
||||||
cmdio.LogString(ctx, "Starting to destroy resources")
|
cmdio.LogString(ctx, "Starting to destroy resources")
|
||||||
|
@ -109,11 +109,11 @@ func (w *destroy) Apply(ctx context.Context, b *bundle.Bundle) ([]bundle.Mutator
|
||||||
// Apply terraform according to the computed destroy plan
|
// Apply terraform according to the computed destroy plan
|
||||||
err = tf.Apply(ctx, tfexec.DirOrPlan(b.Plan.Path))
|
err = tf.Apply(ctx, tfexec.DirOrPlan(b.Plan.Path))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("terraform destroy: %w", err)
|
return fmt.Errorf("terraform destroy: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
cmdio.LogString(ctx, "Successfully destroyed resources!")
|
cmdio.LogString(ctx, "Successfully destroyed resources!")
|
||||||
return nil, nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Destroy returns a [bundle.Mutator] that runs the conceptual equivalent of
|
// Destroy returns a [bundle.Mutator] that runs the conceptual equivalent of
|
||||||
|
|
|
@ -6,6 +6,7 @@ import (
|
||||||
"os"
|
"os"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
"runtime"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle"
|
"github.com/databricks/cli/bundle"
|
||||||
|
@ -69,7 +70,55 @@ func (m *initialize) findExecPath(ctx context.Context, b *bundle.Bundle, tf *con
|
||||||
return tf.ExecPath, nil
|
return tf.ExecPath, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *initialize) Apply(ctx context.Context, b *bundle.Bundle) ([]bundle.Mutator, error) {
|
// This function sets temp dir location for terraform to use. If user does not
|
||||||
|
// specify anything here, we fall back to a `tmp` directory in the bundle's cache
|
||||||
|
// directory
|
||||||
|
//
|
||||||
|
// This is necessary to avoid trying to create temporary files in directories
|
||||||
|
// the CLI and its dependencies do not have access to.
|
||||||
|
//
|
||||||
|
// see: os.TempDir for more context
|
||||||
|
func setTempDirEnvVars(env map[string]string, b *bundle.Bundle) error {
|
||||||
|
switch runtime.GOOS {
|
||||||
|
case "windows":
|
||||||
|
if v, ok := os.LookupEnv("TMP"); ok {
|
||||||
|
env["TMP"] = v
|
||||||
|
} else if v, ok := os.LookupEnv("TEMP"); ok {
|
||||||
|
env["TEMP"] = v
|
||||||
|
} else if v, ok := os.LookupEnv("USERPROFILE"); ok {
|
||||||
|
env["USERPROFILE"] = v
|
||||||
|
} else {
|
||||||
|
tmpDir, err := b.CacheDir("tmp")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
env["TMP"] = tmpDir
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
// If TMPDIR is not set, we let the process fall back to its default value.
|
||||||
|
if v, ok := os.LookupEnv("TMPDIR"); ok {
|
||||||
|
env["TMPDIR"] = v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// This function passes through all proxy related environment variables.
|
||||||
|
func setProxyEnvVars(env map[string]string, b *bundle.Bundle) error {
|
||||||
|
for _, v := range []string{"http_proxy", "https_proxy", "no_proxy"} {
|
||||||
|
// The case (upper or lower) is notoriously inconsistent for tools on Unix systems.
|
||||||
|
// We therefore try to read both the upper and lower case versions of the variable.
|
||||||
|
for _, v := range []string{strings.ToUpper(v), strings.ToLower(v)} {
|
||||||
|
if val, ok := os.LookupEnv(v); ok {
|
||||||
|
// Only set uppercase version of the variable.
|
||||||
|
env[strings.ToUpper(v)] = val
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *initialize) Apply(ctx context.Context, b *bundle.Bundle) error {
|
||||||
tfConfig := b.Config.Bundle.Terraform
|
tfConfig := b.Config.Bundle.Terraform
|
||||||
if tfConfig == nil {
|
if tfConfig == nil {
|
||||||
tfConfig = &config.Terraform{}
|
tfConfig = &config.Terraform{}
|
||||||
|
@ -78,22 +127,22 @@ func (m *initialize) Apply(ctx context.Context, b *bundle.Bundle) ([]bundle.Muta
|
||||||
|
|
||||||
execPath, err := m.findExecPath(ctx, b, tfConfig)
|
execPath, err := m.findExecPath(ctx, b, tfConfig)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
workingDir, err := Dir(b)
|
workingDir, err := Dir(b)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
tf, err := tfexec.NewTerraform(workingDir, execPath)
|
tf, err := tfexec.NewTerraform(workingDir, execPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
env, err := b.AuthEnv()
|
env, err := b.AuthEnv()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Include $HOME in set of environment variables to pass along.
|
// Include $HOME in set of environment variables to pass along.
|
||||||
|
@ -102,15 +151,27 @@ func (m *initialize) Apply(ctx context.Context, b *bundle.Bundle) ([]bundle.Muta
|
||||||
env["HOME"] = home
|
env["HOME"] = home
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Set the temporary directory environment variables
|
||||||
|
err = setTempDirEnvVars(env, b)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set the proxy related environment variables
|
||||||
|
err = setProxyEnvVars(env, b)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
// Configure environment variables for auth for Terraform to use.
|
// Configure environment variables for auth for Terraform to use.
|
||||||
log.Debugf(ctx, "Environment variables for Terraform: %s", strings.Join(maps.Keys(env), ", "))
|
log.Debugf(ctx, "Environment variables for Terraform: %s", strings.Join(maps.Keys(env), ", "))
|
||||||
err = tf.SetEnv(env)
|
err = tf.SetEnv(env)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
b.Terraform = tf
|
b.Terraform = tf
|
||||||
return nil, nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func Initialize() bundle.Mutator {
|
func Initialize() bundle.Mutator {
|
||||||
|
|
|
@ -2,14 +2,25 @@ package terraform
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"os"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
|
"runtime"
|
||||||
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle"
|
"github.com/databricks/cli/bundle"
|
||||||
"github.com/databricks/cli/bundle/config"
|
"github.com/databricks/cli/bundle/config"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
"golang.org/x/exp/maps"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
func unsetEnv(t *testing.T, name string) {
|
||||||
|
t.Setenv(name, "")
|
||||||
|
err := os.Unsetenv(name)
|
||||||
|
require.NoError(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
func TestInitEnvironmentVariables(t *testing.T) {
|
func TestInitEnvironmentVariables(t *testing.T) {
|
||||||
_, err := exec.LookPath("terraform")
|
_, err := exec.LookPath("terraform")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -34,6 +45,230 @@ func TestInitEnvironmentVariables(t *testing.T) {
|
||||||
t.Setenv("DATABRICKS_TOKEN", "foobar")
|
t.Setenv("DATABRICKS_TOKEN", "foobar")
|
||||||
bundle.WorkspaceClient()
|
bundle.WorkspaceClient()
|
||||||
|
|
||||||
_, err = Initialize().Apply(context.Background(), bundle)
|
err = Initialize().Apply(context.Background(), bundle)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestSetTempDirEnvVarsForUnixWithTmpDirSet(t *testing.T) {
|
||||||
|
if runtime.GOOS != "darwin" && runtime.GOOS != "linux" {
|
||||||
|
t.SkipNow()
|
||||||
|
}
|
||||||
|
|
||||||
|
b := &bundle.Bundle{
|
||||||
|
Config: config.Root{
|
||||||
|
Path: t.TempDir(),
|
||||||
|
Bundle: config.Bundle{
|
||||||
|
Environment: "whatever",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set TMPDIR environment variable
|
||||||
|
t.Setenv("TMPDIR", "/foo/bar")
|
||||||
|
|
||||||
|
// compute env
|
||||||
|
env := make(map[string]string, 0)
|
||||||
|
err := setTempDirEnvVars(env, b)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Assert that we pass through TMPDIR.
|
||||||
|
assert.Equal(t, map[string]string{
|
||||||
|
"TMPDIR": "/foo/bar",
|
||||||
|
}, env)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSetTempDirEnvVarsForUnixWithTmpDirNotSet(t *testing.T) {
|
||||||
|
if runtime.GOOS != "darwin" && runtime.GOOS != "linux" {
|
||||||
|
t.SkipNow()
|
||||||
|
}
|
||||||
|
|
||||||
|
b := &bundle.Bundle{
|
||||||
|
Config: config.Root{
|
||||||
|
Path: t.TempDir(),
|
||||||
|
Bundle: config.Bundle{
|
||||||
|
Environment: "whatever",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unset TMPDIR environment variable confirm it's not set
|
||||||
|
unsetEnv(t, "TMPDIR")
|
||||||
|
|
||||||
|
// compute env
|
||||||
|
env := make(map[string]string, 0)
|
||||||
|
err := setTempDirEnvVars(env, b)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Assert that we don't pass through TMPDIR.
|
||||||
|
assert.Equal(t, map[string]string{}, env)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSetTempDirEnvVarsForWindowWithAllTmpDirEnvVarsSet(t *testing.T) {
|
||||||
|
if runtime.GOOS != "windows" {
|
||||||
|
t.SkipNow()
|
||||||
|
}
|
||||||
|
|
||||||
|
b := &bundle.Bundle{
|
||||||
|
Config: config.Root{
|
||||||
|
Path: t.TempDir(),
|
||||||
|
Bundle: config.Bundle{
|
||||||
|
Environment: "whatever",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set environment variables
|
||||||
|
t.Setenv("TMP", "c:\\foo\\a")
|
||||||
|
t.Setenv("TEMP", "c:\\foo\\b")
|
||||||
|
t.Setenv("USERPROFILE", "c:\\foo\\c")
|
||||||
|
|
||||||
|
// compute env
|
||||||
|
env := make(map[string]string, 0)
|
||||||
|
err := setTempDirEnvVars(env, b)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// assert that we pass through the highest priority env var value
|
||||||
|
assert.Equal(t, map[string]string{
|
||||||
|
"TMP": "c:\\foo\\a",
|
||||||
|
}, env)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSetTempDirEnvVarsForWindowWithUserProfileAndTempSet(t *testing.T) {
|
||||||
|
if runtime.GOOS != "windows" {
|
||||||
|
t.SkipNow()
|
||||||
|
}
|
||||||
|
|
||||||
|
b := &bundle.Bundle{
|
||||||
|
Config: config.Root{
|
||||||
|
Path: t.TempDir(),
|
||||||
|
Bundle: config.Bundle{
|
||||||
|
Environment: "whatever",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set environment variables
|
||||||
|
unsetEnv(t, "TMP")
|
||||||
|
t.Setenv("TEMP", "c:\\foo\\b")
|
||||||
|
t.Setenv("USERPROFILE", "c:\\foo\\c")
|
||||||
|
|
||||||
|
// compute env
|
||||||
|
env := make(map[string]string, 0)
|
||||||
|
err := setTempDirEnvVars(env, b)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// assert that we pass through the highest priority env var value
|
||||||
|
assert.Equal(t, map[string]string{
|
||||||
|
"TEMP": "c:\\foo\\b",
|
||||||
|
}, env)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSetTempDirEnvVarsForWindowWithUserProfileSet(t *testing.T) {
|
||||||
|
if runtime.GOOS != "windows" {
|
||||||
|
t.SkipNow()
|
||||||
|
}
|
||||||
|
|
||||||
|
b := &bundle.Bundle{
|
||||||
|
Config: config.Root{
|
||||||
|
Path: t.TempDir(),
|
||||||
|
Bundle: config.Bundle{
|
||||||
|
Environment: "whatever",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set environment variables
|
||||||
|
unsetEnv(t, "TMP")
|
||||||
|
unsetEnv(t, "TEMP")
|
||||||
|
t.Setenv("USERPROFILE", "c:\\foo\\c")
|
||||||
|
|
||||||
|
// compute env
|
||||||
|
env := make(map[string]string, 0)
|
||||||
|
err := setTempDirEnvVars(env, b)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// assert that we pass through the user profile
|
||||||
|
assert.Equal(t, map[string]string{
|
||||||
|
"USERPROFILE": "c:\\foo\\c",
|
||||||
|
}, env)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSetTempDirEnvVarsForWindowsWithoutAnyTempDirEnvVarsSet(t *testing.T) {
|
||||||
|
if runtime.GOOS != "windows" {
|
||||||
|
t.SkipNow()
|
||||||
|
}
|
||||||
|
|
||||||
|
b := &bundle.Bundle{
|
||||||
|
Config: config.Root{
|
||||||
|
Path: t.TempDir(),
|
||||||
|
Bundle: config.Bundle{
|
||||||
|
Environment: "whatever",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
// unset all env vars
|
||||||
|
unsetEnv(t, "TMP")
|
||||||
|
unsetEnv(t, "TEMP")
|
||||||
|
unsetEnv(t, "USERPROFILE")
|
||||||
|
|
||||||
|
// compute env
|
||||||
|
env := make(map[string]string, 0)
|
||||||
|
err := setTempDirEnvVars(env, b)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// assert TMP is set to b.CacheDir("tmp")
|
||||||
|
tmpDir, err := b.CacheDir("tmp")
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Equal(t, map[string]string{
|
||||||
|
"TMP": tmpDir,
|
||||||
|
}, env)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSetProxyEnvVars(t *testing.T) {
|
||||||
|
b := &bundle.Bundle{
|
||||||
|
Config: config.Root{
|
||||||
|
Path: t.TempDir(),
|
||||||
|
Bundle: config.Bundle{
|
||||||
|
Environment: "whatever",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
// Temporarily clear environment variables.
|
||||||
|
clearEnv := func() {
|
||||||
|
for _, v := range []string{"http_proxy", "https_proxy", "no_proxy"} {
|
||||||
|
for _, v := range []string{strings.ToUpper(v), strings.ToLower(v)} {
|
||||||
|
t.Setenv(v, "foo")
|
||||||
|
os.Unsetenv(v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// No proxy env vars set.
|
||||||
|
clearEnv()
|
||||||
|
env := make(map[string]string, 0)
|
||||||
|
err := setProxyEnvVars(env, b)
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Len(t, env, 0)
|
||||||
|
|
||||||
|
// Lower case set.
|
||||||
|
clearEnv()
|
||||||
|
t.Setenv("http_proxy", "foo")
|
||||||
|
t.Setenv("https_proxy", "foo")
|
||||||
|
t.Setenv("no_proxy", "foo")
|
||||||
|
env = make(map[string]string, 0)
|
||||||
|
err = setProxyEnvVars(env, b)
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.ElementsMatch(t, []string{"HTTP_PROXY", "HTTPS_PROXY", "NO_PROXY"}, maps.Keys(env))
|
||||||
|
|
||||||
|
// Upper case set.
|
||||||
|
clearEnv()
|
||||||
|
t.Setenv("HTTP_PROXY", "foo")
|
||||||
|
t.Setenv("HTTPS_PROXY", "foo")
|
||||||
|
t.Setenv("NO_PROXY", "foo")
|
||||||
|
env = make(map[string]string, 0)
|
||||||
|
err = setProxyEnvVars(env, b)
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.ElementsMatch(t, []string{"HTTP_PROXY", "HTTPS_PROXY", "NO_PROXY"}, maps.Keys(env))
|
||||||
|
}
|
||||||
|
|
|
@ -15,34 +15,34 @@ func (l *load) Name() string {
|
||||||
return "terraform.Load"
|
return "terraform.Load"
|
||||||
}
|
}
|
||||||
|
|
||||||
func (l *load) Apply(ctx context.Context, b *bundle.Bundle) ([]bundle.Mutator, error) {
|
func (l *load) Apply(ctx context.Context, b *bundle.Bundle) error {
|
||||||
tf := b.Terraform
|
tf := b.Terraform
|
||||||
if tf == nil {
|
if tf == nil {
|
||||||
return nil, fmt.Errorf("terraform not initialized")
|
return fmt.Errorf("terraform not initialized")
|
||||||
}
|
}
|
||||||
|
|
||||||
err := tf.Init(ctx, tfexec.Upgrade(true))
|
err := tf.Init(ctx, tfexec.Upgrade(true))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("terraform init: %w", err)
|
return fmt.Errorf("terraform init: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
state, err := b.Terraform.Show(ctx)
|
state, err := b.Terraform.Show(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
err = ValidateState(state)
|
err = ValidateState(state)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Merge state into configuration.
|
// Merge state into configuration.
|
||||||
err = TerraformToBundle(state, &b.Config)
|
err = TerraformToBundle(state, &b.Config)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil, nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func ValidateState(state *tfjson.State) error {
|
func ValidateState(state *tfjson.State) error {
|
||||||
|
|
|
@ -32,10 +32,10 @@ func TestLoadWithNoState(t *testing.T) {
|
||||||
t.Setenv("DATABRICKS_TOKEN", "foobar")
|
t.Setenv("DATABRICKS_TOKEN", "foobar")
|
||||||
b.WorkspaceClient()
|
b.WorkspaceClient()
|
||||||
|
|
||||||
err = bundle.Apply(context.Background(), b, []bundle.Mutator{
|
err = bundle.Apply(context.Background(), b, bundle.Seq(
|
||||||
Initialize(),
|
Initialize(),
|
||||||
Load(),
|
Load(),
|
||||||
})
|
))
|
||||||
|
|
||||||
require.ErrorContains(t, err, "Did you forget to run 'databricks bundle deploy'")
|
require.ErrorContains(t, err, "Did you forget to run 'databricks bundle deploy'")
|
||||||
}
|
}
|
||||||
|
|
|
@ -26,30 +26,30 @@ func (p *plan) Name() string {
|
||||||
return "terraform.Plan"
|
return "terraform.Plan"
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *plan) Apply(ctx context.Context, b *bundle.Bundle) ([]bundle.Mutator, error) {
|
func (p *plan) Apply(ctx context.Context, b *bundle.Bundle) error {
|
||||||
tf := b.Terraform
|
tf := b.Terraform
|
||||||
if tf == nil {
|
if tf == nil {
|
||||||
return nil, fmt.Errorf("terraform not initialized")
|
return fmt.Errorf("terraform not initialized")
|
||||||
}
|
}
|
||||||
|
|
||||||
cmdio.LogString(ctx, "Starting plan computation")
|
cmdio.LogString(ctx, "Starting plan computation")
|
||||||
|
|
||||||
err := tf.Init(ctx, tfexec.Upgrade(true))
|
err := tf.Init(ctx, tfexec.Upgrade(true))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("terraform init: %w", err)
|
return fmt.Errorf("terraform init: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Persist computed plan
|
// Persist computed plan
|
||||||
tfDir, err := Dir(b)
|
tfDir, err := Dir(b)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
planPath := filepath.Join(tfDir, "plan")
|
planPath := filepath.Join(tfDir, "plan")
|
||||||
destroy := p.goal == PlanDestroy
|
destroy := p.goal == PlanDestroy
|
||||||
|
|
||||||
notEmpty, err := tf.Plan(ctx, tfexec.Destroy(destroy), tfexec.Out(planPath))
|
notEmpty, err := tf.Plan(ctx, tfexec.Destroy(destroy), tfexec.Out(planPath))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set plan in main bundle struct for downstream mutators
|
// Set plan in main bundle struct for downstream mutators
|
||||||
|
@ -60,7 +60,7 @@ func (p *plan) Apply(ctx context.Context, b *bundle.Bundle) ([]bundle.Mutator, e
|
||||||
}
|
}
|
||||||
|
|
||||||
cmdio.LogString(ctx, fmt.Sprintf("Planning complete and persisted at %s\n", planPath))
|
cmdio.LogString(ctx, fmt.Sprintf("Planning complete and persisted at %s\n", planPath))
|
||||||
return nil, nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Plan returns a [bundle.Mutator] that runs the equivalent of `terraform plan -out ./plan`
|
// Plan returns a [bundle.Mutator] that runs the equivalent of `terraform plan -out ./plan`
|
||||||
|
|
|
@ -2,14 +2,15 @@ package terraform
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"errors"
|
||||||
"io"
|
"io"
|
||||||
|
"io/fs"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle"
|
"github.com/databricks/cli/bundle"
|
||||||
"github.com/databricks/cli/libs/filer"
|
"github.com/databricks/cli/libs/filer"
|
||||||
"github.com/databricks/cli/libs/log"
|
"github.com/databricks/cli/libs/log"
|
||||||
"github.com/databricks/databricks-sdk-go/apierr"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type statePull struct{}
|
type statePull struct{}
|
||||||
|
@ -18,15 +19,15 @@ func (l *statePull) Name() string {
|
||||||
return "terraform:state-pull"
|
return "terraform:state-pull"
|
||||||
}
|
}
|
||||||
|
|
||||||
func (l *statePull) Apply(ctx context.Context, b *bundle.Bundle) ([]bundle.Mutator, error) {
|
func (l *statePull) Apply(ctx context.Context, b *bundle.Bundle) error {
|
||||||
f, err := filer.NewWorkspaceFilesClient(b.WorkspaceClient(), b.Config.Workspace.StatePath)
|
f, err := filer.NewWorkspaceFilesClient(b.WorkspaceClient(), b.Config.Workspace.StatePath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
dir, err := Dir(b)
|
dir, err := Dir(b)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Download state file from filer to local cache directory.
|
// Download state file from filer to local cache directory.
|
||||||
|
@ -34,23 +35,23 @@ func (l *statePull) Apply(ctx context.Context, b *bundle.Bundle) ([]bundle.Mutat
|
||||||
remote, err := f.Read(ctx, TerraformStateFileName)
|
remote, err := f.Read(ctx, TerraformStateFileName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// On first deploy this state file doesn't yet exist.
|
// On first deploy this state file doesn't yet exist.
|
||||||
if apierr.IsMissing(err) {
|
if errors.Is(err, fs.ErrNotExist) {
|
||||||
log.Infof(ctx, "Remote state file does not exist")
|
log.Infof(ctx, "Remote state file does not exist")
|
||||||
return nil, nil
|
return nil
|
||||||
}
|
}
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Expect the state file to live under dir.
|
// Expect the state file to live under dir.
|
||||||
local, err := os.OpenFile(filepath.Join(dir, TerraformStateFileName), os.O_CREATE|os.O_RDWR, 0600)
|
local, err := os.OpenFile(filepath.Join(dir, TerraformStateFileName), os.O_CREATE|os.O_RDWR, 0600)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
defer local.Close()
|
defer local.Close()
|
||||||
|
|
||||||
if !IsLocalStateStale(local, remote) {
|
if !IsLocalStateStale(local, remote) {
|
||||||
log.Infof(ctx, "Local state is the same or newer, ignoring remote state")
|
log.Infof(ctx, "Local state is the same or newer, ignoring remote state")
|
||||||
return nil, nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Truncating the file before writing
|
// Truncating the file before writing
|
||||||
|
@ -61,10 +62,10 @@ func (l *statePull) Apply(ctx context.Context, b *bundle.Bundle) ([]bundle.Mutat
|
||||||
log.Infof(ctx, "Writing remote state file to local cache directory")
|
log.Infof(ctx, "Writing remote state file to local cache directory")
|
||||||
_, err = io.Copy(local, remote)
|
_, err = io.Copy(local, remote)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil, nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func StatePull() bundle.Mutator {
|
func StatePull() bundle.Mutator {
|
||||||
|
|
|
@ -16,31 +16,31 @@ func (l *statePush) Name() string {
|
||||||
return "terraform:state-push"
|
return "terraform:state-push"
|
||||||
}
|
}
|
||||||
|
|
||||||
func (l *statePush) Apply(ctx context.Context, b *bundle.Bundle) ([]bundle.Mutator, error) {
|
func (l *statePush) Apply(ctx context.Context, b *bundle.Bundle) error {
|
||||||
f, err := filer.NewWorkspaceFilesClient(b.WorkspaceClient(), b.Config.Workspace.StatePath)
|
f, err := filer.NewWorkspaceFilesClient(b.WorkspaceClient(), b.Config.Workspace.StatePath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
dir, err := Dir(b)
|
dir, err := Dir(b)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Expect the state file to live under dir.
|
// Expect the state file to live under dir.
|
||||||
local, err := os.Open(filepath.Join(dir, TerraformStateFileName))
|
local, err := os.Open(filepath.Join(dir, TerraformStateFileName))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Upload state file from local cache directory to filer.
|
// Upload state file from local cache directory to filer.
|
||||||
log.Infof(ctx, "Writing local state file to remote state directory")
|
log.Infof(ctx, "Writing local state file to remote state directory")
|
||||||
err = f.Write(ctx, TerraformStateFileName, local, filer.CreateParentDirectories, filer.OverwriteIfExists)
|
err = f.Write(ctx, TerraformStateFileName, local, filer.CreateParentDirectories, filer.OverwriteIfExists)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil, nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func StatePush() bundle.Mutator {
|
func StatePush() bundle.Mutator {
|
||||||
|
|
|
@ -15,16 +15,16 @@ func (w *write) Name() string {
|
||||||
return "terraform.Write"
|
return "terraform.Write"
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *write) Apply(ctx context.Context, b *bundle.Bundle) ([]bundle.Mutator, error) {
|
func (w *write) Apply(ctx context.Context, b *bundle.Bundle) error {
|
||||||
dir, err := Dir(b)
|
dir, err := Dir(b)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
root := BundleToTerraform(&b.Config)
|
root := BundleToTerraform(&b.Config)
|
||||||
f, err := os.Create(filepath.Join(dir, "bundle.tf.json"))
|
f, err := os.Create(filepath.Join(dir, "bundle.tf.json"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
defer f.Close()
|
defer f.Close()
|
||||||
|
@ -33,10 +33,10 @@ func (w *write) Apply(ctx context.Context, b *bundle.Bundle) ([]bundle.Mutator,
|
||||||
enc.SetIndent("", " ")
|
enc.SetIndent("", " ")
|
||||||
err = enc.Encode(root)
|
err = enc.Encode(root)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil, nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Write returns a [bundle.Mutator] that converts resources in a bundle configuration
|
// Write returns a [bundle.Mutator] that converts resources in a bundle configuration
|
||||||
|
|
|
@ -2,10 +2,12 @@ package deployer
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"io/fs"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/databricks/cli/libs/locker"
|
"github.com/databricks/cli/libs/locker"
|
||||||
"github.com/databricks/cli/libs/log"
|
"github.com/databricks/cli/libs/log"
|
||||||
|
@ -97,22 +99,25 @@ func (b *Deployer) tfStateLocalPath() string {
|
||||||
return filepath.Join(b.DefaultTerraformRoot(), "terraform.tfstate")
|
return filepath.Join(b.DefaultTerraformRoot(), "terraform.tfstate")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *Deployer) LoadTerraformState(ctx context.Context) error {
|
func (d *Deployer) LoadTerraformState(ctx context.Context) error {
|
||||||
bytes, err := b.locker.GetRawJsonFileContent(ctx, b.tfStateRemotePath())
|
r, err := d.locker.Read(ctx, d.tfStateRemotePath())
|
||||||
if err != nil {
|
if errors.Is(err, fs.ErrNotExist) {
|
||||||
// If remote tf state is absent, use local tf state
|
// If remote tf state is absent, use local tf state
|
||||||
if strings.Contains(err.Error(), "File not found.") {
|
|
||||||
return nil
|
return nil
|
||||||
} else {
|
|
||||||
return err
|
|
||||||
}
|
}
|
||||||
}
|
|
||||||
err = os.MkdirAll(b.DefaultTerraformRoot(), os.ModeDir)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
err = os.WriteFile(b.tfStateLocalPath(), bytes, os.ModePerm)
|
defer r.Close()
|
||||||
|
err = os.MkdirAll(d.DefaultTerraformRoot(), os.ModeDir)
|
||||||
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
}
|
||||||
|
b, err := io.ReadAll(r)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return os.WriteFile(d.tfStateLocalPath(), b, os.ModePerm)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *Deployer) SaveTerraformState(ctx context.Context) error {
|
func (b *Deployer) SaveTerraformState(ctx context.Context) error {
|
||||||
|
@ -120,7 +125,7 @@ func (b *Deployer) SaveTerraformState(ctx context.Context) error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return b.locker.PutFile(ctx, b.tfStateRemotePath(), bytes)
|
return b.locker.Write(ctx, b.tfStateRemotePath(), bytes)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Deployer) Lock(ctx context.Context, isForced bool) error {
|
func (d *Deployer) Lock(ctx context.Context, isForced bool) error {
|
||||||
|
|
|
@ -12,16 +12,17 @@ type Config struct {
|
||||||
AzureTenantId string `json:"azure_tenant_id,omitempty"`
|
AzureTenantId string `json:"azure_tenant_id,omitempty"`
|
||||||
AzureUseMsi bool `json:"azure_use_msi,omitempty"`
|
AzureUseMsi bool `json:"azure_use_msi,omitempty"`
|
||||||
AzureWorkspaceResourceId string `json:"azure_workspace_resource_id,omitempty"`
|
AzureWorkspaceResourceId string `json:"azure_workspace_resource_id,omitempty"`
|
||||||
BricksCliPath string `json:"bricks_cli_path,omitempty"`
|
|
||||||
ClientId string `json:"client_id,omitempty"`
|
ClientId string `json:"client_id,omitempty"`
|
||||||
ClientSecret string `json:"client_secret,omitempty"`
|
ClientSecret string `json:"client_secret,omitempty"`
|
||||||
ConfigFile string `json:"config_file,omitempty"`
|
ConfigFile string `json:"config_file,omitempty"`
|
||||||
|
DatabricksCliPath string `json:"databricks_cli_path,omitempty"`
|
||||||
DebugHeaders bool `json:"debug_headers,omitempty"`
|
DebugHeaders bool `json:"debug_headers,omitempty"`
|
||||||
DebugTruncateBytes int `json:"debug_truncate_bytes,omitempty"`
|
DebugTruncateBytes int `json:"debug_truncate_bytes,omitempty"`
|
||||||
GoogleCredentials string `json:"google_credentials,omitempty"`
|
GoogleCredentials string `json:"google_credentials,omitempty"`
|
||||||
GoogleServiceAccount string `json:"google_service_account,omitempty"`
|
GoogleServiceAccount string `json:"google_service_account,omitempty"`
|
||||||
Host string `json:"host,omitempty"`
|
Host string `json:"host,omitempty"`
|
||||||
HttpTimeoutSeconds int `json:"http_timeout_seconds,omitempty"`
|
HttpTimeoutSeconds int `json:"http_timeout_seconds,omitempty"`
|
||||||
|
MetadataServiceUrl string `json:"metadata_service_url,omitempty"`
|
||||||
Password string `json:"password,omitempty"`
|
Password string `json:"password,omitempty"`
|
||||||
Profile string `json:"profile,omitempty"`
|
Profile string `json:"profile,omitempty"`
|
||||||
RateLimit int `json:"rate_limit,omitempty"`
|
RateLimit int `json:"rate_limit,omitempty"`
|
||||||
|
|
|
@ -120,12 +120,17 @@ type DataSourceClusterClusterInfoInitScriptsS3 struct {
|
||||||
Region string `json:"region,omitempty"`
|
Region string `json:"region,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type DataSourceClusterClusterInfoInitScriptsWorkspace struct {
|
||||||
|
Destination string `json:"destination,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
type DataSourceClusterClusterInfoInitScripts struct {
|
type DataSourceClusterClusterInfoInitScripts struct {
|
||||||
Abfss *DataSourceClusterClusterInfoInitScriptsAbfss `json:"abfss,omitempty"`
|
Abfss *DataSourceClusterClusterInfoInitScriptsAbfss `json:"abfss,omitempty"`
|
||||||
Dbfs *DataSourceClusterClusterInfoInitScriptsDbfs `json:"dbfs,omitempty"`
|
Dbfs *DataSourceClusterClusterInfoInitScriptsDbfs `json:"dbfs,omitempty"`
|
||||||
File *DataSourceClusterClusterInfoInitScriptsFile `json:"file,omitempty"`
|
File *DataSourceClusterClusterInfoInitScriptsFile `json:"file,omitempty"`
|
||||||
Gcs *DataSourceClusterClusterInfoInitScriptsGcs `json:"gcs,omitempty"`
|
Gcs *DataSourceClusterClusterInfoInitScriptsGcs `json:"gcs,omitempty"`
|
||||||
S3 *DataSourceClusterClusterInfoInitScriptsS3 `json:"s3,omitempty"`
|
S3 *DataSourceClusterClusterInfoInitScriptsS3 `json:"s3,omitempty"`
|
||||||
|
Workspace *DataSourceClusterClusterInfoInitScriptsWorkspace `json:"workspace,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type DataSourceClusterClusterInfoTerminationReason struct {
|
type DataSourceClusterClusterInfoTerminationReason struct {
|
||||||
|
|
|
@ -4,7 +4,11 @@ package schema
|
||||||
|
|
||||||
type DataSourceClusterPolicy struct {
|
type DataSourceClusterPolicy struct {
|
||||||
Definition string `json:"definition,omitempty"`
|
Definition string `json:"definition,omitempty"`
|
||||||
|
Description string `json:"description,omitempty"`
|
||||||
Id string `json:"id,omitempty"`
|
Id string `json:"id,omitempty"`
|
||||||
|
IsDefault bool `json:"is_default,omitempty"`
|
||||||
MaxClustersPerUser int `json:"max_clusters_per_user,omitempty"`
|
MaxClustersPerUser int `json:"max_clusters_per_user,omitempty"`
|
||||||
Name string `json:"name"`
|
Name string `json:"name,omitempty"`
|
||||||
|
PolicyFamilyDefinitionOverrides string `json:"policy_family_definition_overrides,omitempty"`
|
||||||
|
PolicyFamilyId string `json:"policy_family_id,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
|
@ -127,12 +127,17 @@ type DataSourceJobJobSettingsSettingsJobClusterNewClusterInitScriptsS3 struct {
|
||||||
Region string `json:"region,omitempty"`
|
Region string `json:"region,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type DataSourceJobJobSettingsSettingsJobClusterNewClusterInitScriptsWorkspace struct {
|
||||||
|
Destination string `json:"destination,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
type DataSourceJobJobSettingsSettingsJobClusterNewClusterInitScripts struct {
|
type DataSourceJobJobSettingsSettingsJobClusterNewClusterInitScripts struct {
|
||||||
Abfss *DataSourceJobJobSettingsSettingsJobClusterNewClusterInitScriptsAbfss `json:"abfss,omitempty"`
|
Abfss *DataSourceJobJobSettingsSettingsJobClusterNewClusterInitScriptsAbfss `json:"abfss,omitempty"`
|
||||||
Dbfs *DataSourceJobJobSettingsSettingsJobClusterNewClusterInitScriptsDbfs `json:"dbfs,omitempty"`
|
Dbfs *DataSourceJobJobSettingsSettingsJobClusterNewClusterInitScriptsDbfs `json:"dbfs,omitempty"`
|
||||||
File *DataSourceJobJobSettingsSettingsJobClusterNewClusterInitScriptsFile `json:"file,omitempty"`
|
File *DataSourceJobJobSettingsSettingsJobClusterNewClusterInitScriptsFile `json:"file,omitempty"`
|
||||||
Gcs *DataSourceJobJobSettingsSettingsJobClusterNewClusterInitScriptsGcs `json:"gcs,omitempty"`
|
Gcs *DataSourceJobJobSettingsSettingsJobClusterNewClusterInitScriptsGcs `json:"gcs,omitempty"`
|
||||||
S3 *DataSourceJobJobSettingsSettingsJobClusterNewClusterInitScriptsS3 `json:"s3,omitempty"`
|
S3 *DataSourceJobJobSettingsSettingsJobClusterNewClusterInitScriptsS3 `json:"s3,omitempty"`
|
||||||
|
Workspace *DataSourceJobJobSettingsSettingsJobClusterNewClusterInitScriptsWorkspace `json:"workspace,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type DataSourceJobJobSettingsSettingsJobClusterNewClusterWorkloadTypeClients struct {
|
type DataSourceJobJobSettingsSettingsJobClusterNewClusterWorkloadTypeClients struct {
|
||||||
|
@ -303,12 +308,17 @@ type DataSourceJobJobSettingsSettingsNewClusterInitScriptsS3 struct {
|
||||||
Region string `json:"region,omitempty"`
|
Region string `json:"region,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type DataSourceJobJobSettingsSettingsNewClusterInitScriptsWorkspace struct {
|
||||||
|
Destination string `json:"destination,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
type DataSourceJobJobSettingsSettingsNewClusterInitScripts struct {
|
type DataSourceJobJobSettingsSettingsNewClusterInitScripts struct {
|
||||||
Abfss *DataSourceJobJobSettingsSettingsNewClusterInitScriptsAbfss `json:"abfss,omitempty"`
|
Abfss *DataSourceJobJobSettingsSettingsNewClusterInitScriptsAbfss `json:"abfss,omitempty"`
|
||||||
Dbfs *DataSourceJobJobSettingsSettingsNewClusterInitScriptsDbfs `json:"dbfs,omitempty"`
|
Dbfs *DataSourceJobJobSettingsSettingsNewClusterInitScriptsDbfs `json:"dbfs,omitempty"`
|
||||||
File *DataSourceJobJobSettingsSettingsNewClusterInitScriptsFile `json:"file,omitempty"`
|
File *DataSourceJobJobSettingsSettingsNewClusterInitScriptsFile `json:"file,omitempty"`
|
||||||
Gcs *DataSourceJobJobSettingsSettingsNewClusterInitScriptsGcs `json:"gcs,omitempty"`
|
Gcs *DataSourceJobJobSettingsSettingsNewClusterInitScriptsGcs `json:"gcs,omitempty"`
|
||||||
S3 *DataSourceJobJobSettingsSettingsNewClusterInitScriptsS3 `json:"s3,omitempty"`
|
S3 *DataSourceJobJobSettingsSettingsNewClusterInitScriptsS3 `json:"s3,omitempty"`
|
||||||
|
Workspace *DataSourceJobJobSettingsSettingsNewClusterInitScriptsWorkspace `json:"workspace,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type DataSourceJobJobSettingsSettingsNewClusterWorkloadTypeClients struct {
|
type DataSourceJobJobSettingsSettingsNewClusterWorkloadTypeClients struct {
|
||||||
|
@ -359,6 +369,11 @@ type DataSourceJobJobSettingsSettingsNotebookTask struct {
|
||||||
Source string `json:"source,omitempty"`
|
Source string `json:"source,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type DataSourceJobJobSettingsSettingsNotificationSettings struct {
|
||||||
|
NoAlertForCanceledRuns bool `json:"no_alert_for_canceled_runs,omitempty"`
|
||||||
|
NoAlertForSkippedRuns bool `json:"no_alert_for_skipped_runs,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
type DataSourceJobJobSettingsSettingsPipelineTask struct {
|
type DataSourceJobJobSettingsSettingsPipelineTask struct {
|
||||||
PipelineId string `json:"pipeline_id"`
|
PipelineId string `json:"pipeline_id"`
|
||||||
}
|
}
|
||||||
|
@ -370,6 +385,14 @@ type DataSourceJobJobSettingsSettingsPythonWheelTask struct {
|
||||||
Parameters []string `json:"parameters,omitempty"`
|
Parameters []string `json:"parameters,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type DataSourceJobJobSettingsSettingsQueue struct {
|
||||||
|
}
|
||||||
|
|
||||||
|
type DataSourceJobJobSettingsSettingsRunAs struct {
|
||||||
|
ServicePrincipalName string `json:"service_principal_name,omitempty"`
|
||||||
|
UserName string `json:"user_name,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
type DataSourceJobJobSettingsSettingsSchedule struct {
|
type DataSourceJobJobSettingsSettingsSchedule struct {
|
||||||
PauseStatus string `json:"pause_status,omitempty"`
|
PauseStatus string `json:"pause_status,omitempty"`
|
||||||
QuartzCronExpression string `json:"quartz_cron_expression"`
|
QuartzCronExpression string `json:"quartz_cron_expression"`
|
||||||
|
@ -385,6 +408,7 @@ type DataSourceJobJobSettingsSettingsSparkJarTask struct {
|
||||||
type DataSourceJobJobSettingsSettingsSparkPythonTask struct {
|
type DataSourceJobJobSettingsSettingsSparkPythonTask struct {
|
||||||
Parameters []string `json:"parameters,omitempty"`
|
Parameters []string `json:"parameters,omitempty"`
|
||||||
PythonFile string `json:"python_file"`
|
PythonFile string `json:"python_file"`
|
||||||
|
Source string `json:"source,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type DataSourceJobJobSettingsSettingsSparkSubmitTask struct {
|
type DataSourceJobJobSettingsSettingsSparkSubmitTask struct {
|
||||||
|
@ -533,12 +557,17 @@ type DataSourceJobJobSettingsSettingsTaskNewClusterInitScriptsS3 struct {
|
||||||
Region string `json:"region,omitempty"`
|
Region string `json:"region,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type DataSourceJobJobSettingsSettingsTaskNewClusterInitScriptsWorkspace struct {
|
||||||
|
Destination string `json:"destination,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
type DataSourceJobJobSettingsSettingsTaskNewClusterInitScripts struct {
|
type DataSourceJobJobSettingsSettingsTaskNewClusterInitScripts struct {
|
||||||
Abfss *DataSourceJobJobSettingsSettingsTaskNewClusterInitScriptsAbfss `json:"abfss,omitempty"`
|
Abfss *DataSourceJobJobSettingsSettingsTaskNewClusterInitScriptsAbfss `json:"abfss,omitempty"`
|
||||||
Dbfs *DataSourceJobJobSettingsSettingsTaskNewClusterInitScriptsDbfs `json:"dbfs,omitempty"`
|
Dbfs *DataSourceJobJobSettingsSettingsTaskNewClusterInitScriptsDbfs `json:"dbfs,omitempty"`
|
||||||
File *DataSourceJobJobSettingsSettingsTaskNewClusterInitScriptsFile `json:"file,omitempty"`
|
File *DataSourceJobJobSettingsSettingsTaskNewClusterInitScriptsFile `json:"file,omitempty"`
|
||||||
Gcs *DataSourceJobJobSettingsSettingsTaskNewClusterInitScriptsGcs `json:"gcs,omitempty"`
|
Gcs *DataSourceJobJobSettingsSettingsTaskNewClusterInitScriptsGcs `json:"gcs,omitempty"`
|
||||||
S3 *DataSourceJobJobSettingsSettingsTaskNewClusterInitScriptsS3 `json:"s3,omitempty"`
|
S3 *DataSourceJobJobSettingsSettingsTaskNewClusterInitScriptsS3 `json:"s3,omitempty"`
|
||||||
|
Workspace *DataSourceJobJobSettingsSettingsTaskNewClusterInitScriptsWorkspace `json:"workspace,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type DataSourceJobJobSettingsSettingsTaskNewClusterWorkloadTypeClients struct {
|
type DataSourceJobJobSettingsSettingsTaskNewClusterWorkloadTypeClients struct {
|
||||||
|
@ -609,6 +638,7 @@ type DataSourceJobJobSettingsSettingsTaskSparkJarTask struct {
|
||||||
type DataSourceJobJobSettingsSettingsTaskSparkPythonTask struct {
|
type DataSourceJobJobSettingsSettingsTaskSparkPythonTask struct {
|
||||||
Parameters []string `json:"parameters,omitempty"`
|
Parameters []string `json:"parameters,omitempty"`
|
||||||
PythonFile string `json:"python_file"`
|
PythonFile string `json:"python_file"`
|
||||||
|
Source string `json:"source,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type DataSourceJobJobSettingsSettingsTaskSparkSubmitTask struct {
|
type DataSourceJobJobSettingsSettingsTaskSparkSubmitTask struct {
|
||||||
|
@ -623,6 +653,10 @@ type DataSourceJobJobSettingsSettingsTaskSqlTaskDashboard struct {
|
||||||
DashboardId string `json:"dashboard_id"`
|
DashboardId string `json:"dashboard_id"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type DataSourceJobJobSettingsSettingsTaskSqlTaskFile struct {
|
||||||
|
Path string `json:"path"`
|
||||||
|
}
|
||||||
|
|
||||||
type DataSourceJobJobSettingsSettingsTaskSqlTaskQuery struct {
|
type DataSourceJobJobSettingsSettingsTaskSqlTaskQuery struct {
|
||||||
QueryId string `json:"query_id"`
|
QueryId string `json:"query_id"`
|
||||||
}
|
}
|
||||||
|
@ -632,6 +666,7 @@ type DataSourceJobJobSettingsSettingsTaskSqlTask struct {
|
||||||
WarehouseId string `json:"warehouse_id,omitempty"`
|
WarehouseId string `json:"warehouse_id,omitempty"`
|
||||||
Alert *DataSourceJobJobSettingsSettingsTaskSqlTaskAlert `json:"alert,omitempty"`
|
Alert *DataSourceJobJobSettingsSettingsTaskSqlTaskAlert `json:"alert,omitempty"`
|
||||||
Dashboard *DataSourceJobJobSettingsSettingsTaskSqlTaskDashboard `json:"dashboard,omitempty"`
|
Dashboard *DataSourceJobJobSettingsSettingsTaskSqlTaskDashboard `json:"dashboard,omitempty"`
|
||||||
|
File *DataSourceJobJobSettingsSettingsTaskSqlTaskFile `json:"file,omitempty"`
|
||||||
Query *DataSourceJobJobSettingsSettingsTaskSqlTaskQuery `json:"query,omitempty"`
|
Query *DataSourceJobJobSettingsSettingsTaskSqlTaskQuery `json:"query,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -642,6 +677,7 @@ type DataSourceJobJobSettingsSettingsTask struct {
|
||||||
MaxRetries int `json:"max_retries,omitempty"`
|
MaxRetries int `json:"max_retries,omitempty"`
|
||||||
MinRetryIntervalMillis int `json:"min_retry_interval_millis,omitempty"`
|
MinRetryIntervalMillis int `json:"min_retry_interval_millis,omitempty"`
|
||||||
RetryOnTimeout bool `json:"retry_on_timeout,omitempty"`
|
RetryOnTimeout bool `json:"retry_on_timeout,omitempty"`
|
||||||
|
RunIf string `json:"run_if,omitempty"`
|
||||||
TaskKey string `json:"task_key,omitempty"`
|
TaskKey string `json:"task_key,omitempty"`
|
||||||
TimeoutSeconds int `json:"timeout_seconds,omitempty"`
|
TimeoutSeconds int `json:"timeout_seconds,omitempty"`
|
||||||
DbtTask *DataSourceJobJobSettingsSettingsTaskDbtTask `json:"dbt_task,omitempty"`
|
DbtTask *DataSourceJobJobSettingsSettingsTaskDbtTask `json:"dbt_task,omitempty"`
|
||||||
|
@ -658,6 +694,17 @@ type DataSourceJobJobSettingsSettingsTask struct {
|
||||||
SqlTask *DataSourceJobJobSettingsSettingsTaskSqlTask `json:"sql_task,omitempty"`
|
SqlTask *DataSourceJobJobSettingsSettingsTaskSqlTask `json:"sql_task,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type DataSourceJobJobSettingsSettingsTriggerFileArrival struct {
|
||||||
|
MinTimeBetweenTriggerSeconds int `json:"min_time_between_trigger_seconds,omitempty"`
|
||||||
|
Url string `json:"url"`
|
||||||
|
WaitAfterLastChangeSeconds int `json:"wait_after_last_change_seconds,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type DataSourceJobJobSettingsSettingsTrigger struct {
|
||||||
|
PauseStatus string `json:"pause_status,omitempty"`
|
||||||
|
FileArrival *DataSourceJobJobSettingsSettingsTriggerFileArrival `json:"file_arrival,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
type DataSourceJobJobSettingsSettingsWebhookNotificationsOnFailure struct {
|
type DataSourceJobJobSettingsSettingsWebhookNotificationsOnFailure struct {
|
||||||
Id string `json:"id"`
|
Id string `json:"id"`
|
||||||
}
|
}
|
||||||
|
@ -694,13 +741,17 @@ type DataSourceJobJobSettingsSettings struct {
|
||||||
Library []DataSourceJobJobSettingsSettingsLibrary `json:"library,omitempty"`
|
Library []DataSourceJobJobSettingsSettingsLibrary `json:"library,omitempty"`
|
||||||
NewCluster *DataSourceJobJobSettingsSettingsNewCluster `json:"new_cluster,omitempty"`
|
NewCluster *DataSourceJobJobSettingsSettingsNewCluster `json:"new_cluster,omitempty"`
|
||||||
NotebookTask *DataSourceJobJobSettingsSettingsNotebookTask `json:"notebook_task,omitempty"`
|
NotebookTask *DataSourceJobJobSettingsSettingsNotebookTask `json:"notebook_task,omitempty"`
|
||||||
|
NotificationSettings *DataSourceJobJobSettingsSettingsNotificationSettings `json:"notification_settings,omitempty"`
|
||||||
PipelineTask *DataSourceJobJobSettingsSettingsPipelineTask `json:"pipeline_task,omitempty"`
|
PipelineTask *DataSourceJobJobSettingsSettingsPipelineTask `json:"pipeline_task,omitempty"`
|
||||||
PythonWheelTask *DataSourceJobJobSettingsSettingsPythonWheelTask `json:"python_wheel_task,omitempty"`
|
PythonWheelTask *DataSourceJobJobSettingsSettingsPythonWheelTask `json:"python_wheel_task,omitempty"`
|
||||||
|
Queue *DataSourceJobJobSettingsSettingsQueue `json:"queue,omitempty"`
|
||||||
|
RunAs *DataSourceJobJobSettingsSettingsRunAs `json:"run_as,omitempty"`
|
||||||
Schedule *DataSourceJobJobSettingsSettingsSchedule `json:"schedule,omitempty"`
|
Schedule *DataSourceJobJobSettingsSettingsSchedule `json:"schedule,omitempty"`
|
||||||
SparkJarTask *DataSourceJobJobSettingsSettingsSparkJarTask `json:"spark_jar_task,omitempty"`
|
SparkJarTask *DataSourceJobJobSettingsSettingsSparkJarTask `json:"spark_jar_task,omitempty"`
|
||||||
SparkPythonTask *DataSourceJobJobSettingsSettingsSparkPythonTask `json:"spark_python_task,omitempty"`
|
SparkPythonTask *DataSourceJobJobSettingsSettingsSparkPythonTask `json:"spark_python_task,omitempty"`
|
||||||
SparkSubmitTask *DataSourceJobJobSettingsSettingsSparkSubmitTask `json:"spark_submit_task,omitempty"`
|
SparkSubmitTask *DataSourceJobJobSettingsSettingsSparkSubmitTask `json:"spark_submit_task,omitempty"`
|
||||||
Task []DataSourceJobJobSettingsSettingsTask `json:"task,omitempty"`
|
Task []DataSourceJobJobSettingsSettingsTask `json:"task,omitempty"`
|
||||||
|
Trigger *DataSourceJobJobSettingsSettingsTrigger `json:"trigger,omitempty"`
|
||||||
WebhookNotifications *DataSourceJobJobSettingsSettingsWebhookNotifications `json:"webhook_notifications,omitempty"`
|
WebhookNotifications *DataSourceJobJobSettingsSettingsWebhookNotifications `json:"webhook_notifications,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -708,6 +759,7 @@ type DataSourceJobJobSettings struct {
|
||||||
CreatedTime int `json:"created_time,omitempty"`
|
CreatedTime int `json:"created_time,omitempty"`
|
||||||
CreatorUserName string `json:"creator_user_name,omitempty"`
|
CreatorUserName string `json:"creator_user_name,omitempty"`
|
||||||
JobId int `json:"job_id,omitempty"`
|
JobId int `json:"job_id,omitempty"`
|
||||||
|
RunAsUserName string `json:"run_as_user_name,omitempty"`
|
||||||
Settings *DataSourceJobJobSettingsSettings `json:"settings,omitempty"`
|
Settings *DataSourceJobJobSettingsSettings `json:"settings,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,9 @@
|
||||||
|
// Generated from Databricks Terraform provider schema. DO NOT EDIT.
|
||||||
|
|
||||||
|
package schema
|
||||||
|
|
||||||
|
type DataSourcePipelines struct {
|
||||||
|
Id string `json:"id,omitempty"`
|
||||||
|
Ids []string `json:"ids,omitempty"`
|
||||||
|
PipelineName string `json:"pipeline_name,omitempty"`
|
||||||
|
}
|
|
@ -23,6 +23,7 @@ type DataSources struct {
|
||||||
NodeType map[string]*DataSourceNodeType `json:"databricks_node_type,omitempty"`
|
NodeType map[string]*DataSourceNodeType `json:"databricks_node_type,omitempty"`
|
||||||
Notebook map[string]*DataSourceNotebook `json:"databricks_notebook,omitempty"`
|
Notebook map[string]*DataSourceNotebook `json:"databricks_notebook,omitempty"`
|
||||||
NotebookPaths map[string]*DataSourceNotebookPaths `json:"databricks_notebook_paths,omitempty"`
|
NotebookPaths map[string]*DataSourceNotebookPaths `json:"databricks_notebook_paths,omitempty"`
|
||||||
|
Pipelines map[string]*DataSourcePipelines `json:"databricks_pipelines,omitempty"`
|
||||||
Schemas map[string]*DataSourceSchemas `json:"databricks_schemas,omitempty"`
|
Schemas map[string]*DataSourceSchemas `json:"databricks_schemas,omitempty"`
|
||||||
ServicePrincipal map[string]*DataSourceServicePrincipal `json:"databricks_service_principal,omitempty"`
|
ServicePrincipal map[string]*DataSourceServicePrincipal `json:"databricks_service_principal,omitempty"`
|
||||||
ServicePrincipals map[string]*DataSourceServicePrincipals `json:"databricks_service_principals,omitempty"`
|
ServicePrincipals map[string]*DataSourceServicePrincipals `json:"databricks_service_principals,omitempty"`
|
||||||
|
@ -59,6 +60,7 @@ func NewDataSources() *DataSources {
|
||||||
NodeType: make(map[string]*DataSourceNodeType),
|
NodeType: make(map[string]*DataSourceNodeType),
|
||||||
Notebook: make(map[string]*DataSourceNotebook),
|
Notebook: make(map[string]*DataSourceNotebook),
|
||||||
NotebookPaths: make(map[string]*DataSourceNotebookPaths),
|
NotebookPaths: make(map[string]*DataSourceNotebookPaths),
|
||||||
|
Pipelines: make(map[string]*DataSourcePipelines),
|
||||||
Schemas: make(map[string]*DataSourceSchemas),
|
Schemas: make(map[string]*DataSourceSchemas),
|
||||||
ServicePrincipal: make(map[string]*DataSourceServicePrincipal),
|
ServicePrincipal: make(map[string]*DataSourceServicePrincipal),
|
||||||
ServicePrincipals: make(map[string]*DataSourceServicePrincipals),
|
ServicePrincipals: make(map[string]*DataSourceServicePrincipals),
|
||||||
|
|
|
@ -6,6 +6,7 @@ type ResourceCatalog struct {
|
||||||
Comment string `json:"comment,omitempty"`
|
Comment string `json:"comment,omitempty"`
|
||||||
ForceDestroy bool `json:"force_destroy,omitempty"`
|
ForceDestroy bool `json:"force_destroy,omitempty"`
|
||||||
Id string `json:"id,omitempty"`
|
Id string `json:"id,omitempty"`
|
||||||
|
IsolationMode string `json:"isolation_mode,omitempty"`
|
||||||
MetastoreId string `json:"metastore_id,omitempty"`
|
MetastoreId string `json:"metastore_id,omitempty"`
|
||||||
Name string `json:"name"`
|
Name string `json:"name"`
|
||||||
Owner string `json:"owner,omitempty"`
|
Owner string `json:"owner,omitempty"`
|
||||||
|
|
|
@ -98,12 +98,17 @@ type ResourceClusterInitScriptsS3 struct {
|
||||||
Region string `json:"region,omitempty"`
|
Region string `json:"region,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type ResourceClusterInitScriptsWorkspace struct {
|
||||||
|
Destination string `json:"destination,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
type ResourceClusterInitScripts struct {
|
type ResourceClusterInitScripts struct {
|
||||||
Abfss *ResourceClusterInitScriptsAbfss `json:"abfss,omitempty"`
|
Abfss *ResourceClusterInitScriptsAbfss `json:"abfss,omitempty"`
|
||||||
Dbfs *ResourceClusterInitScriptsDbfs `json:"dbfs,omitempty"`
|
Dbfs *ResourceClusterInitScriptsDbfs `json:"dbfs,omitempty"`
|
||||||
File *ResourceClusterInitScriptsFile `json:"file,omitempty"`
|
File *ResourceClusterInitScriptsFile `json:"file,omitempty"`
|
||||||
Gcs *ResourceClusterInitScriptsGcs `json:"gcs,omitempty"`
|
Gcs *ResourceClusterInitScriptsGcs `json:"gcs,omitempty"`
|
||||||
S3 *ResourceClusterInitScriptsS3 `json:"s3,omitempty"`
|
S3 *ResourceClusterInitScriptsS3 `json:"s3,omitempty"`
|
||||||
|
Workspace *ResourceClusterInitScriptsWorkspace `json:"workspace,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type ResourceClusterLibraryCran struct {
|
type ResourceClusterLibraryCran struct {
|
||||||
|
|
|
@ -3,9 +3,12 @@
|
||||||
package schema
|
package schema
|
||||||
|
|
||||||
type ResourceClusterPolicy struct {
|
type ResourceClusterPolicy struct {
|
||||||
Definition string `json:"definition"`
|
Definition string `json:"definition,omitempty"`
|
||||||
|
Description string `json:"description,omitempty"`
|
||||||
Id string `json:"id,omitempty"`
|
Id string `json:"id,omitempty"`
|
||||||
MaxClustersPerUser int `json:"max_clusters_per_user,omitempty"`
|
MaxClustersPerUser int `json:"max_clusters_per_user,omitempty"`
|
||||||
Name string `json:"name"`
|
Name string `json:"name"`
|
||||||
|
PolicyFamilyDefinitionOverrides string `json:"policy_family_definition_overrides,omitempty"`
|
||||||
|
PolicyFamilyId string `json:"policy_family_id,omitempty"`
|
||||||
PolicyId string `json:"policy_id,omitempty"`
|
PolicyId string `json:"policy_id,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
|
@ -5,10 +5,12 @@ package schema
|
||||||
type ResourceExternalLocation struct {
|
type ResourceExternalLocation struct {
|
||||||
Comment string `json:"comment,omitempty"`
|
Comment string `json:"comment,omitempty"`
|
||||||
CredentialName string `json:"credential_name"`
|
CredentialName string `json:"credential_name"`
|
||||||
|
ForceDestroy bool `json:"force_destroy,omitempty"`
|
||||||
Id string `json:"id,omitempty"`
|
Id string `json:"id,omitempty"`
|
||||||
MetastoreId string `json:"metastore_id,omitempty"`
|
MetastoreId string `json:"metastore_id,omitempty"`
|
||||||
Name string `json:"name"`
|
Name string `json:"name"`
|
||||||
Owner string `json:"owner,omitempty"`
|
Owner string `json:"owner,omitempty"`
|
||||||
|
ReadOnly bool `json:"read_only,omitempty"`
|
||||||
SkipValidation bool `json:"skip_validation,omitempty"`
|
SkipValidation bool `json:"skip_validation,omitempty"`
|
||||||
Url string `json:"url"`
|
Url string `json:"url"`
|
||||||
}
|
}
|
||||||
|
|
|
@ -127,12 +127,17 @@ type ResourceJobJobClusterNewClusterInitScriptsS3 struct {
|
||||||
Region string `json:"region,omitempty"`
|
Region string `json:"region,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type ResourceJobJobClusterNewClusterInitScriptsWorkspace struct {
|
||||||
|
Destination string `json:"destination,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
type ResourceJobJobClusterNewClusterInitScripts struct {
|
type ResourceJobJobClusterNewClusterInitScripts struct {
|
||||||
Abfss *ResourceJobJobClusterNewClusterInitScriptsAbfss `json:"abfss,omitempty"`
|
Abfss *ResourceJobJobClusterNewClusterInitScriptsAbfss `json:"abfss,omitempty"`
|
||||||
Dbfs *ResourceJobJobClusterNewClusterInitScriptsDbfs `json:"dbfs,omitempty"`
|
Dbfs *ResourceJobJobClusterNewClusterInitScriptsDbfs `json:"dbfs,omitempty"`
|
||||||
File *ResourceJobJobClusterNewClusterInitScriptsFile `json:"file,omitempty"`
|
File *ResourceJobJobClusterNewClusterInitScriptsFile `json:"file,omitempty"`
|
||||||
Gcs *ResourceJobJobClusterNewClusterInitScriptsGcs `json:"gcs,omitempty"`
|
Gcs *ResourceJobJobClusterNewClusterInitScriptsGcs `json:"gcs,omitempty"`
|
||||||
S3 *ResourceJobJobClusterNewClusterInitScriptsS3 `json:"s3,omitempty"`
|
S3 *ResourceJobJobClusterNewClusterInitScriptsS3 `json:"s3,omitempty"`
|
||||||
|
Workspace *ResourceJobJobClusterNewClusterInitScriptsWorkspace `json:"workspace,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type ResourceJobJobClusterNewClusterWorkloadTypeClients struct {
|
type ResourceJobJobClusterNewClusterWorkloadTypeClients struct {
|
||||||
|
@ -303,12 +308,17 @@ type ResourceJobNewClusterInitScriptsS3 struct {
|
||||||
Region string `json:"region,omitempty"`
|
Region string `json:"region,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type ResourceJobNewClusterInitScriptsWorkspace struct {
|
||||||
|
Destination string `json:"destination,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
type ResourceJobNewClusterInitScripts struct {
|
type ResourceJobNewClusterInitScripts struct {
|
||||||
Abfss *ResourceJobNewClusterInitScriptsAbfss `json:"abfss,omitempty"`
|
Abfss *ResourceJobNewClusterInitScriptsAbfss `json:"abfss,omitempty"`
|
||||||
Dbfs *ResourceJobNewClusterInitScriptsDbfs `json:"dbfs,omitempty"`
|
Dbfs *ResourceJobNewClusterInitScriptsDbfs `json:"dbfs,omitempty"`
|
||||||
File *ResourceJobNewClusterInitScriptsFile `json:"file,omitempty"`
|
File *ResourceJobNewClusterInitScriptsFile `json:"file,omitempty"`
|
||||||
Gcs *ResourceJobNewClusterInitScriptsGcs `json:"gcs,omitempty"`
|
Gcs *ResourceJobNewClusterInitScriptsGcs `json:"gcs,omitempty"`
|
||||||
S3 *ResourceJobNewClusterInitScriptsS3 `json:"s3,omitempty"`
|
S3 *ResourceJobNewClusterInitScriptsS3 `json:"s3,omitempty"`
|
||||||
|
Workspace *ResourceJobNewClusterInitScriptsWorkspace `json:"workspace,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type ResourceJobNewClusterWorkloadTypeClients struct {
|
type ResourceJobNewClusterWorkloadTypeClients struct {
|
||||||
|
@ -359,6 +369,11 @@ type ResourceJobNotebookTask struct {
|
||||||
Source string `json:"source,omitempty"`
|
Source string `json:"source,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type ResourceJobNotificationSettings struct {
|
||||||
|
NoAlertForCanceledRuns bool `json:"no_alert_for_canceled_runs,omitempty"`
|
||||||
|
NoAlertForSkippedRuns bool `json:"no_alert_for_skipped_runs,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
type ResourceJobPipelineTask struct {
|
type ResourceJobPipelineTask struct {
|
||||||
PipelineId string `json:"pipeline_id"`
|
PipelineId string `json:"pipeline_id"`
|
||||||
}
|
}
|
||||||
|
@ -370,6 +385,14 @@ type ResourceJobPythonWheelTask struct {
|
||||||
Parameters []string `json:"parameters,omitempty"`
|
Parameters []string `json:"parameters,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type ResourceJobQueue struct {
|
||||||
|
}
|
||||||
|
|
||||||
|
type ResourceJobRunAs struct {
|
||||||
|
ServicePrincipalName string `json:"service_principal_name,omitempty"`
|
||||||
|
UserName string `json:"user_name,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
type ResourceJobSchedule struct {
|
type ResourceJobSchedule struct {
|
||||||
PauseStatus string `json:"pause_status,omitempty"`
|
PauseStatus string `json:"pause_status,omitempty"`
|
||||||
QuartzCronExpression string `json:"quartz_cron_expression"`
|
QuartzCronExpression string `json:"quartz_cron_expression"`
|
||||||
|
@ -385,6 +408,7 @@ type ResourceJobSparkJarTask struct {
|
||||||
type ResourceJobSparkPythonTask struct {
|
type ResourceJobSparkPythonTask struct {
|
||||||
Parameters []string `json:"parameters,omitempty"`
|
Parameters []string `json:"parameters,omitempty"`
|
||||||
PythonFile string `json:"python_file"`
|
PythonFile string `json:"python_file"`
|
||||||
|
Source string `json:"source,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type ResourceJobSparkSubmitTask struct {
|
type ResourceJobSparkSubmitTask struct {
|
||||||
|
@ -533,12 +557,17 @@ type ResourceJobTaskNewClusterInitScriptsS3 struct {
|
||||||
Region string `json:"region,omitempty"`
|
Region string `json:"region,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type ResourceJobTaskNewClusterInitScriptsWorkspace struct {
|
||||||
|
Destination string `json:"destination,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
type ResourceJobTaskNewClusterInitScripts struct {
|
type ResourceJobTaskNewClusterInitScripts struct {
|
||||||
Abfss *ResourceJobTaskNewClusterInitScriptsAbfss `json:"abfss,omitempty"`
|
Abfss *ResourceJobTaskNewClusterInitScriptsAbfss `json:"abfss,omitempty"`
|
||||||
Dbfs *ResourceJobTaskNewClusterInitScriptsDbfs `json:"dbfs,omitempty"`
|
Dbfs *ResourceJobTaskNewClusterInitScriptsDbfs `json:"dbfs,omitempty"`
|
||||||
File *ResourceJobTaskNewClusterInitScriptsFile `json:"file,omitempty"`
|
File *ResourceJobTaskNewClusterInitScriptsFile `json:"file,omitempty"`
|
||||||
Gcs *ResourceJobTaskNewClusterInitScriptsGcs `json:"gcs,omitempty"`
|
Gcs *ResourceJobTaskNewClusterInitScriptsGcs `json:"gcs,omitempty"`
|
||||||
S3 *ResourceJobTaskNewClusterInitScriptsS3 `json:"s3,omitempty"`
|
S3 *ResourceJobTaskNewClusterInitScriptsS3 `json:"s3,omitempty"`
|
||||||
|
Workspace *ResourceJobTaskNewClusterInitScriptsWorkspace `json:"workspace,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type ResourceJobTaskNewClusterWorkloadTypeClients struct {
|
type ResourceJobTaskNewClusterWorkloadTypeClients struct {
|
||||||
|
@ -609,6 +638,7 @@ type ResourceJobTaskSparkJarTask struct {
|
||||||
type ResourceJobTaskSparkPythonTask struct {
|
type ResourceJobTaskSparkPythonTask struct {
|
||||||
Parameters []string `json:"parameters,omitempty"`
|
Parameters []string `json:"parameters,omitempty"`
|
||||||
PythonFile string `json:"python_file"`
|
PythonFile string `json:"python_file"`
|
||||||
|
Source string `json:"source,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type ResourceJobTaskSparkSubmitTask struct {
|
type ResourceJobTaskSparkSubmitTask struct {
|
||||||
|
@ -623,6 +653,10 @@ type ResourceJobTaskSqlTaskDashboard struct {
|
||||||
DashboardId string `json:"dashboard_id"`
|
DashboardId string `json:"dashboard_id"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type ResourceJobTaskSqlTaskFile struct {
|
||||||
|
Path string `json:"path"`
|
||||||
|
}
|
||||||
|
|
||||||
type ResourceJobTaskSqlTaskQuery struct {
|
type ResourceJobTaskSqlTaskQuery struct {
|
||||||
QueryId string `json:"query_id"`
|
QueryId string `json:"query_id"`
|
||||||
}
|
}
|
||||||
|
@ -632,6 +666,7 @@ type ResourceJobTaskSqlTask struct {
|
||||||
WarehouseId string `json:"warehouse_id,omitempty"`
|
WarehouseId string `json:"warehouse_id,omitempty"`
|
||||||
Alert *ResourceJobTaskSqlTaskAlert `json:"alert,omitempty"`
|
Alert *ResourceJobTaskSqlTaskAlert `json:"alert,omitempty"`
|
||||||
Dashboard *ResourceJobTaskSqlTaskDashboard `json:"dashboard,omitempty"`
|
Dashboard *ResourceJobTaskSqlTaskDashboard `json:"dashboard,omitempty"`
|
||||||
|
File *ResourceJobTaskSqlTaskFile `json:"file,omitempty"`
|
||||||
Query *ResourceJobTaskSqlTaskQuery `json:"query,omitempty"`
|
Query *ResourceJobTaskSqlTaskQuery `json:"query,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -642,6 +677,7 @@ type ResourceJobTask struct {
|
||||||
MaxRetries int `json:"max_retries,omitempty"`
|
MaxRetries int `json:"max_retries,omitempty"`
|
||||||
MinRetryIntervalMillis int `json:"min_retry_interval_millis,omitempty"`
|
MinRetryIntervalMillis int `json:"min_retry_interval_millis,omitempty"`
|
||||||
RetryOnTimeout bool `json:"retry_on_timeout,omitempty"`
|
RetryOnTimeout bool `json:"retry_on_timeout,omitempty"`
|
||||||
|
RunIf string `json:"run_if,omitempty"`
|
||||||
TaskKey string `json:"task_key,omitempty"`
|
TaskKey string `json:"task_key,omitempty"`
|
||||||
TimeoutSeconds int `json:"timeout_seconds,omitempty"`
|
TimeoutSeconds int `json:"timeout_seconds,omitempty"`
|
||||||
DbtTask *ResourceJobTaskDbtTask `json:"dbt_task,omitempty"`
|
DbtTask *ResourceJobTaskDbtTask `json:"dbt_task,omitempty"`
|
||||||
|
@ -658,6 +694,17 @@ type ResourceJobTask struct {
|
||||||
SqlTask *ResourceJobTaskSqlTask `json:"sql_task,omitempty"`
|
SqlTask *ResourceJobTaskSqlTask `json:"sql_task,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type ResourceJobTriggerFileArrival struct {
|
||||||
|
MinTimeBetweenTriggerSeconds int `json:"min_time_between_trigger_seconds,omitempty"`
|
||||||
|
Url string `json:"url"`
|
||||||
|
WaitAfterLastChangeSeconds int `json:"wait_after_last_change_seconds,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type ResourceJobTrigger struct {
|
||||||
|
PauseStatus string `json:"pause_status,omitempty"`
|
||||||
|
FileArrival *ResourceJobTriggerFileArrival `json:"file_arrival,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
type ResourceJobWebhookNotificationsOnFailure struct {
|
type ResourceJobWebhookNotificationsOnFailure struct {
|
||||||
Id string `json:"id"`
|
Id string `json:"id"`
|
||||||
}
|
}
|
||||||
|
@ -697,12 +744,16 @@ type ResourceJob struct {
|
||||||
Library []ResourceJobLibrary `json:"library,omitempty"`
|
Library []ResourceJobLibrary `json:"library,omitempty"`
|
||||||
NewCluster *ResourceJobNewCluster `json:"new_cluster,omitempty"`
|
NewCluster *ResourceJobNewCluster `json:"new_cluster,omitempty"`
|
||||||
NotebookTask *ResourceJobNotebookTask `json:"notebook_task,omitempty"`
|
NotebookTask *ResourceJobNotebookTask `json:"notebook_task,omitempty"`
|
||||||
|
NotificationSettings *ResourceJobNotificationSettings `json:"notification_settings,omitempty"`
|
||||||
PipelineTask *ResourceJobPipelineTask `json:"pipeline_task,omitempty"`
|
PipelineTask *ResourceJobPipelineTask `json:"pipeline_task,omitempty"`
|
||||||
PythonWheelTask *ResourceJobPythonWheelTask `json:"python_wheel_task,omitempty"`
|
PythonWheelTask *ResourceJobPythonWheelTask `json:"python_wheel_task,omitempty"`
|
||||||
|
Queue *ResourceJobQueue `json:"queue,omitempty"`
|
||||||
|
RunAs *ResourceJobRunAs `json:"run_as,omitempty"`
|
||||||
Schedule *ResourceJobSchedule `json:"schedule,omitempty"`
|
Schedule *ResourceJobSchedule `json:"schedule,omitempty"`
|
||||||
SparkJarTask *ResourceJobSparkJarTask `json:"spark_jar_task,omitempty"`
|
SparkJarTask *ResourceJobSparkJarTask `json:"spark_jar_task,omitempty"`
|
||||||
SparkPythonTask *ResourceJobSparkPythonTask `json:"spark_python_task,omitempty"`
|
SparkPythonTask *ResourceJobSparkPythonTask `json:"spark_python_task,omitempty"`
|
||||||
SparkSubmitTask *ResourceJobSparkSubmitTask `json:"spark_submit_task,omitempty"`
|
SparkSubmitTask *ResourceJobSparkSubmitTask `json:"spark_submit_task,omitempty"`
|
||||||
Task []ResourceJobTask `json:"task,omitempty"`
|
Task []ResourceJobTask `json:"task,omitempty"`
|
||||||
|
Trigger *ResourceJobTrigger `json:"trigger,omitempty"`
|
||||||
WebhookNotifications *ResourceJobWebhookNotifications `json:"webhook_notifications,omitempty"`
|
WebhookNotifications *ResourceJobWebhookNotifications `json:"webhook_notifications,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
|
@ -27,5 +27,6 @@ type ResourceModelServingConfig struct {
|
||||||
type ResourceModelServing struct {
|
type ResourceModelServing struct {
|
||||||
Id string `json:"id,omitempty"`
|
Id string `json:"id,omitempty"`
|
||||||
Name string `json:"name"`
|
Name string `json:"name"`
|
||||||
|
ServingEndpointId string `json:"serving_endpoint_id,omitempty"`
|
||||||
Config *ResourceModelServingConfig `json:"config,omitempty"`
|
Config *ResourceModelServingConfig `json:"config,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
|
@ -26,9 +26,12 @@ type ResourcePermissions struct {
|
||||||
RegisteredModelId string `json:"registered_model_id,omitempty"`
|
RegisteredModelId string `json:"registered_model_id,omitempty"`
|
||||||
RepoId string `json:"repo_id,omitempty"`
|
RepoId string `json:"repo_id,omitempty"`
|
||||||
RepoPath string `json:"repo_path,omitempty"`
|
RepoPath string `json:"repo_path,omitempty"`
|
||||||
|
ServingEndpointId string `json:"serving_endpoint_id,omitempty"`
|
||||||
SqlAlertId string `json:"sql_alert_id,omitempty"`
|
SqlAlertId string `json:"sql_alert_id,omitempty"`
|
||||||
SqlDashboardId string `json:"sql_dashboard_id,omitempty"`
|
SqlDashboardId string `json:"sql_dashboard_id,omitempty"`
|
||||||
SqlEndpointId string `json:"sql_endpoint_id,omitempty"`
|
SqlEndpointId string `json:"sql_endpoint_id,omitempty"`
|
||||||
SqlQueryId string `json:"sql_query_id,omitempty"`
|
SqlQueryId string `json:"sql_query_id,omitempty"`
|
||||||
|
WorkspaceFileId string `json:"workspace_file_id,omitempty"`
|
||||||
|
WorkspaceFilePath string `json:"workspace_file_path,omitempty"`
|
||||||
AccessControl []ResourcePermissionsAccessControl `json:"access_control,omitempty"`
|
AccessControl []ResourcePermissionsAccessControl `json:"access_control,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
|
@ -76,12 +76,17 @@ type ResourcePipelineClusterInitScriptsS3 struct {
|
||||||
Region string `json:"region,omitempty"`
|
Region string `json:"region,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type ResourcePipelineClusterInitScriptsWorkspace struct {
|
||||||
|
Destination string `json:"destination,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
type ResourcePipelineClusterInitScripts struct {
|
type ResourcePipelineClusterInitScripts struct {
|
||||||
Abfss *ResourcePipelineClusterInitScriptsAbfss `json:"abfss,omitempty"`
|
Abfss *ResourcePipelineClusterInitScriptsAbfss `json:"abfss,omitempty"`
|
||||||
Dbfs *ResourcePipelineClusterInitScriptsDbfs `json:"dbfs,omitempty"`
|
Dbfs *ResourcePipelineClusterInitScriptsDbfs `json:"dbfs,omitempty"`
|
||||||
File *ResourcePipelineClusterInitScriptsFile `json:"file,omitempty"`
|
File *ResourcePipelineClusterInitScriptsFile `json:"file,omitempty"`
|
||||||
Gcs *ResourcePipelineClusterInitScriptsGcs `json:"gcs,omitempty"`
|
Gcs *ResourcePipelineClusterInitScriptsGcs `json:"gcs,omitempty"`
|
||||||
S3 *ResourcePipelineClusterInitScriptsS3 `json:"s3,omitempty"`
|
S3 *ResourcePipelineClusterInitScriptsS3 `json:"s3,omitempty"`
|
||||||
|
Workspace *ResourcePipelineClusterInitScriptsWorkspace `json:"workspace,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type ResourcePipelineCluster struct {
|
type ResourcePipelineCluster struct {
|
||||||
|
@ -133,6 +138,11 @@ type ResourcePipelineLibrary struct {
|
||||||
Notebook *ResourcePipelineLibraryNotebook `json:"notebook,omitempty"`
|
Notebook *ResourcePipelineLibraryNotebook `json:"notebook,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type ResourcePipelineNotification struct {
|
||||||
|
Alerts []string `json:"alerts"`
|
||||||
|
EmailRecipients []string `json:"email_recipients"`
|
||||||
|
}
|
||||||
|
|
||||||
type ResourcePipeline struct {
|
type ResourcePipeline struct {
|
||||||
AllowDuplicateNames bool `json:"allow_duplicate_names,omitempty"`
|
AllowDuplicateNames bool `json:"allow_duplicate_names,omitempty"`
|
||||||
Catalog string `json:"catalog,omitempty"`
|
Catalog string `json:"catalog,omitempty"`
|
||||||
|
@ -144,10 +154,12 @@ type ResourcePipeline struct {
|
||||||
Id string `json:"id,omitempty"`
|
Id string `json:"id,omitempty"`
|
||||||
Name string `json:"name,omitempty"`
|
Name string `json:"name,omitempty"`
|
||||||
Photon bool `json:"photon,omitempty"`
|
Photon bool `json:"photon,omitempty"`
|
||||||
|
Serverless bool `json:"serverless,omitempty"`
|
||||||
Storage string `json:"storage,omitempty"`
|
Storage string `json:"storage,omitempty"`
|
||||||
Target string `json:"target,omitempty"`
|
Target string `json:"target,omitempty"`
|
||||||
Url string `json:"url,omitempty"`
|
Url string `json:"url,omitempty"`
|
||||||
Cluster []ResourcePipelineCluster `json:"cluster,omitempty"`
|
Cluster []ResourcePipelineCluster `json:"cluster,omitempty"`
|
||||||
Filters *ResourcePipelineFilters `json:"filters,omitempty"`
|
Filters *ResourcePipelineFilters `json:"filters,omitempty"`
|
||||||
Library []ResourcePipelineLibrary `json:"library,omitempty"`
|
Library []ResourcePipelineLibrary `json:"library,omitempty"`
|
||||||
|
Notification []ResourcePipelineNotification `json:"notification,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,26 @@
|
||||||
|
// Generated from Databricks Terraform provider schema. DO NOT EDIT.
|
||||||
|
|
||||||
|
package schema
|
||||||
|
|
||||||
|
type ResourceSqlTableColumn struct {
|
||||||
|
Comment string `json:"comment,omitempty"`
|
||||||
|
Name string `json:"name"`
|
||||||
|
Nullable bool `json:"nullable,omitempty"`
|
||||||
|
Type string `json:"type"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type ResourceSqlTable struct {
|
||||||
|
CatalogName string `json:"catalog_name"`
|
||||||
|
ClusterId string `json:"cluster_id,omitempty"`
|
||||||
|
Comment string `json:"comment,omitempty"`
|
||||||
|
DataSourceFormat string `json:"data_source_format,omitempty"`
|
||||||
|
Id string `json:"id,omitempty"`
|
||||||
|
Name string `json:"name"`
|
||||||
|
Properties map[string]string `json:"properties,omitempty"`
|
||||||
|
SchemaName string `json:"schema_name"`
|
||||||
|
StorageCredentialName string `json:"storage_credential_name,omitempty"`
|
||||||
|
StorageLocation string `json:"storage_location,omitempty"`
|
||||||
|
TableType string `json:"table_type"`
|
||||||
|
ViewDefinition string `json:"view_definition,omitempty"`
|
||||||
|
Column []ResourceSqlTableColumn `json:"column,omitempty"`
|
||||||
|
}
|
|
@ -32,6 +32,7 @@ type ResourceStorageCredential struct {
|
||||||
MetastoreId string `json:"metastore_id,omitempty"`
|
MetastoreId string `json:"metastore_id,omitempty"`
|
||||||
Name string `json:"name"`
|
Name string `json:"name"`
|
||||||
Owner string `json:"owner,omitempty"`
|
Owner string `json:"owner,omitempty"`
|
||||||
|
ReadOnly bool `json:"read_only,omitempty"`
|
||||||
AwsIamRole *ResourceStorageCredentialAwsIamRole `json:"aws_iam_role,omitempty"`
|
AwsIamRole *ResourceStorageCredentialAwsIamRole `json:"aws_iam_role,omitempty"`
|
||||||
AzureManagedIdentity *ResourceStorageCredentialAzureManagedIdentity `json:"azure_managed_identity,omitempty"`
|
AzureManagedIdentity *ResourceStorageCredentialAzureManagedIdentity `json:"azure_managed_identity,omitempty"`
|
||||||
AzureServicePrincipal *ResourceStorageCredentialAzureServicePrincipal `json:"azure_service_principal,omitempty"`
|
AzureServicePrincipal *ResourceStorageCredentialAzureServicePrincipal `json:"azure_service_principal,omitempty"`
|
||||||
|
|
|
@ -0,0 +1,14 @@
|
||||||
|
// Generated from Databricks Terraform provider schema. DO NOT EDIT.
|
||||||
|
|
||||||
|
package schema
|
||||||
|
|
||||||
|
type ResourceVolume struct {
|
||||||
|
CatalogName string `json:"catalog_name"`
|
||||||
|
Comment string `json:"comment,omitempty"`
|
||||||
|
Id string `json:"id,omitempty"`
|
||||||
|
Name string `json:"name"`
|
||||||
|
Owner string `json:"owner,omitempty"`
|
||||||
|
SchemaName string `json:"schema_name"`
|
||||||
|
StorageLocation string `json:"storage_location,omitempty"`
|
||||||
|
VolumeType string `json:"volume_type"`
|
||||||
|
}
|
|
@ -0,0 +1,13 @@
|
||||||
|
// Generated from Databricks Terraform provider schema. DO NOT EDIT.
|
||||||
|
|
||||||
|
package schema
|
||||||
|
|
||||||
|
type ResourceWorkspaceFile struct {
|
||||||
|
ContentBase64 string `json:"content_base64,omitempty"`
|
||||||
|
Id string `json:"id,omitempty"`
|
||||||
|
Md5 string `json:"md5,omitempty"`
|
||||||
|
ObjectId int `json:"object_id,omitempty"`
|
||||||
|
Path string `json:"path"`
|
||||||
|
Source string `json:"source,omitempty"`
|
||||||
|
Url string `json:"url,omitempty"`
|
||||||
|
}
|
|
@ -65,6 +65,7 @@ type Resources struct {
|
||||||
SqlGlobalConfig map[string]*ResourceSqlGlobalConfig `json:"databricks_sql_global_config,omitempty"`
|
SqlGlobalConfig map[string]*ResourceSqlGlobalConfig `json:"databricks_sql_global_config,omitempty"`
|
||||||
SqlPermissions map[string]*ResourceSqlPermissions `json:"databricks_sql_permissions,omitempty"`
|
SqlPermissions map[string]*ResourceSqlPermissions `json:"databricks_sql_permissions,omitempty"`
|
||||||
SqlQuery map[string]*ResourceSqlQuery `json:"databricks_sql_query,omitempty"`
|
SqlQuery map[string]*ResourceSqlQuery `json:"databricks_sql_query,omitempty"`
|
||||||
|
SqlTable map[string]*ResourceSqlTable `json:"databricks_sql_table,omitempty"`
|
||||||
SqlVisualization map[string]*ResourceSqlVisualization `json:"databricks_sql_visualization,omitempty"`
|
SqlVisualization map[string]*ResourceSqlVisualization `json:"databricks_sql_visualization,omitempty"`
|
||||||
SqlWidget map[string]*ResourceSqlWidget `json:"databricks_sql_widget,omitempty"`
|
SqlWidget map[string]*ResourceSqlWidget `json:"databricks_sql_widget,omitempty"`
|
||||||
StorageCredential map[string]*ResourceStorageCredential `json:"databricks_storage_credential,omitempty"`
|
StorageCredential map[string]*ResourceStorageCredential `json:"databricks_storage_credential,omitempty"`
|
||||||
|
@ -73,7 +74,9 @@ type Resources struct {
|
||||||
User map[string]*ResourceUser `json:"databricks_user,omitempty"`
|
User map[string]*ResourceUser `json:"databricks_user,omitempty"`
|
||||||
UserInstanceProfile map[string]*ResourceUserInstanceProfile `json:"databricks_user_instance_profile,omitempty"`
|
UserInstanceProfile map[string]*ResourceUserInstanceProfile `json:"databricks_user_instance_profile,omitempty"`
|
||||||
UserRole map[string]*ResourceUserRole `json:"databricks_user_role,omitempty"`
|
UserRole map[string]*ResourceUserRole `json:"databricks_user_role,omitempty"`
|
||||||
|
Volume map[string]*ResourceVolume `json:"databricks_volume,omitempty"`
|
||||||
WorkspaceConf map[string]*ResourceWorkspaceConf `json:"databricks_workspace_conf,omitempty"`
|
WorkspaceConf map[string]*ResourceWorkspaceConf `json:"databricks_workspace_conf,omitempty"`
|
||||||
|
WorkspaceFile map[string]*ResourceWorkspaceFile `json:"databricks_workspace_file,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewResources() *Resources {
|
func NewResources() *Resources {
|
||||||
|
@ -140,6 +143,7 @@ func NewResources() *Resources {
|
||||||
SqlGlobalConfig: make(map[string]*ResourceSqlGlobalConfig),
|
SqlGlobalConfig: make(map[string]*ResourceSqlGlobalConfig),
|
||||||
SqlPermissions: make(map[string]*ResourceSqlPermissions),
|
SqlPermissions: make(map[string]*ResourceSqlPermissions),
|
||||||
SqlQuery: make(map[string]*ResourceSqlQuery),
|
SqlQuery: make(map[string]*ResourceSqlQuery),
|
||||||
|
SqlTable: make(map[string]*ResourceSqlTable),
|
||||||
SqlVisualization: make(map[string]*ResourceSqlVisualization),
|
SqlVisualization: make(map[string]*ResourceSqlVisualization),
|
||||||
SqlWidget: make(map[string]*ResourceSqlWidget),
|
SqlWidget: make(map[string]*ResourceSqlWidget),
|
||||||
StorageCredential: make(map[string]*ResourceStorageCredential),
|
StorageCredential: make(map[string]*ResourceStorageCredential),
|
||||||
|
@ -148,6 +152,8 @@ func NewResources() *Resources {
|
||||||
User: make(map[string]*ResourceUser),
|
User: make(map[string]*ResourceUser),
|
||||||
UserInstanceProfile: make(map[string]*ResourceUserInstanceProfile),
|
UserInstanceProfile: make(map[string]*ResourceUserInstanceProfile),
|
||||||
UserRole: make(map[string]*ResourceUserRole),
|
UserRole: make(map[string]*ResourceUserRole),
|
||||||
|
Volume: make(map[string]*ResourceVolume),
|
||||||
WorkspaceConf: make(map[string]*ResourceWorkspaceConf),
|
WorkspaceConf: make(map[string]*ResourceWorkspaceConf),
|
||||||
|
WorkspaceFile: make(map[string]*ResourceWorkspaceFile),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -13,42 +13,18 @@ type Mutator interface {
|
||||||
Name() string
|
Name() string
|
||||||
|
|
||||||
// Apply mutates the specified bundle object.
|
// Apply mutates the specified bundle object.
|
||||||
// It may return a list of mutators to apply immediately after this mutator.
|
Apply(context.Context, *Bundle) error
|
||||||
// For example: when processing all configuration files in the tree; each file gets
|
|
||||||
// its own mutator instance.
|
|
||||||
Apply(context.Context, *Bundle) ([]Mutator, error)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// applyMutator calls apply on the specified mutator given a bundle.
|
func Apply(ctx context.Context, b *Bundle, m Mutator) error {
|
||||||
// Any mutators this call returns are applied recursively.
|
|
||||||
func applyMutator(ctx context.Context, b *Bundle, m Mutator) error {
|
|
||||||
ctx = log.NewContext(ctx, log.GetLogger(ctx).With("mutator", m.Name()))
|
ctx = log.NewContext(ctx, log.GetLogger(ctx).With("mutator", m.Name()))
|
||||||
|
|
||||||
log.Debugf(ctx, "Apply")
|
log.Debugf(ctx, "Apply")
|
||||||
ms, err := m.Apply(ctx, b)
|
err := m.Apply(ctx, b)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf(ctx, "Error: %s", err)
|
log.Errorf(ctx, "Error: %s", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Apply recursively.
|
|
||||||
err = Apply(ctx, b, ms)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func Apply(ctx context.Context, b *Bundle, ms []Mutator) error {
|
|
||||||
if len(ms) == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
for _, m := range ms {
|
|
||||||
err := applyMutator(ctx, b, m)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -16,9 +16,9 @@ func (t *testMutator) Name() string {
|
||||||
return "test"
|
return "test"
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *testMutator) Apply(_ context.Context, b *Bundle) ([]Mutator, error) {
|
func (t *testMutator) Apply(ctx context.Context, b *Bundle) error {
|
||||||
t.applyCalled++
|
t.applyCalled++
|
||||||
return t.nestedMutators, nil
|
return Apply(ctx, b, Seq(t.nestedMutators...))
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestMutator(t *testing.T) {
|
func TestMutator(t *testing.T) {
|
||||||
|
@ -35,7 +35,7 @@ func TestMutator(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
bundle := &Bundle{}
|
bundle := &Bundle{}
|
||||||
err := Apply(context.Background(), bundle, []Mutator{m})
|
err := Apply(context.Background(), bundle, m)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
assert.Equal(t, 1, m.applyCalled)
|
assert.Equal(t, 1, m.applyCalled)
|
||||||
|
|
|
@ -10,8 +10,10 @@ import (
|
||||||
|
|
||||||
// The deploy phase deploys artifacts and resources.
|
// The deploy phase deploys artifacts and resources.
|
||||||
func Deploy() bundle.Mutator {
|
func Deploy() bundle.Mutator {
|
||||||
deployPhase := bundle.Defer([]bundle.Mutator{
|
deployMutator := bundle.Seq(
|
||||||
lock.Acquire(),
|
lock.Acquire(),
|
||||||
|
bundle.Defer(
|
||||||
|
bundle.Seq(
|
||||||
files.Upload(),
|
files.Upload(),
|
||||||
artifacts.UploadAll(),
|
artifacts.UploadAll(),
|
||||||
terraform.Interpolate(),
|
terraform.Interpolate(),
|
||||||
|
@ -19,12 +21,13 @@ func Deploy() bundle.Mutator {
|
||||||
terraform.StatePull(),
|
terraform.StatePull(),
|
||||||
terraform.Apply(),
|
terraform.Apply(),
|
||||||
terraform.StatePush(),
|
terraform.StatePush(),
|
||||||
}, []bundle.Mutator{
|
),
|
||||||
lock.Release(),
|
lock.Release(lock.GoalDeploy),
|
||||||
})
|
),
|
||||||
|
)
|
||||||
|
|
||||||
return newPhase(
|
return newPhase(
|
||||||
"deploy",
|
"deploy",
|
||||||
deployPhase,
|
[]bundle.Mutator{deployMutator},
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
|
@ -9,19 +9,23 @@ import (
|
||||||
|
|
||||||
// The destroy phase deletes artifacts and resources.
|
// The destroy phase deletes artifacts and resources.
|
||||||
func Destroy() bundle.Mutator {
|
func Destroy() bundle.Mutator {
|
||||||
destroyPhase := bundle.Defer([]bundle.Mutator{
|
|
||||||
|
destroyMutator := bundle.Seq(
|
||||||
lock.Acquire(),
|
lock.Acquire(),
|
||||||
|
bundle.Defer(
|
||||||
|
bundle.Seq(
|
||||||
terraform.StatePull(),
|
terraform.StatePull(),
|
||||||
terraform.Plan(terraform.PlanGoal("destroy")),
|
terraform.Plan(terraform.PlanGoal("destroy")),
|
||||||
terraform.Destroy(),
|
terraform.Destroy(),
|
||||||
terraform.StatePush(),
|
terraform.StatePush(),
|
||||||
files.Delete(),
|
files.Delete(),
|
||||||
}, []bundle.Mutator{
|
),
|
||||||
lock.Release(),
|
lock.Release(lock.GoalDestroy),
|
||||||
})
|
),
|
||||||
|
)
|
||||||
|
|
||||||
return newPhase(
|
return newPhase(
|
||||||
"destroy",
|
"destroy",
|
||||||
destroyPhase,
|
[]bundle.Mutator{destroyMutator},
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
|
@ -26,7 +26,7 @@ func (p *phase) Name() string {
|
||||||
return p.name
|
return p.name
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *phase) Apply(ctx context.Context, b *bundle.Bundle) ([]bundle.Mutator, error) {
|
func (p *phase) Apply(ctx context.Context, b *bundle.Bundle) error {
|
||||||
log.Infof(ctx, "Phase: %s", p.Name())
|
log.Infof(ctx, "Phase: %s", p.Name())
|
||||||
return p.mutators, nil
|
return bundle.Apply(ctx, b, bundle.Seq(p.mutators...))
|
||||||
}
|
}
|
||||||
|
|
|
@ -12,7 +12,6 @@ import (
|
||||||
"github.com/databricks/cli/bundle/run/progress"
|
"github.com/databricks/cli/bundle/run/progress"
|
||||||
"github.com/databricks/cli/libs/cmdio"
|
"github.com/databricks/cli/libs/cmdio"
|
||||||
"github.com/databricks/cli/libs/log"
|
"github.com/databricks/cli/libs/log"
|
||||||
"github.com/databricks/databricks-sdk-go/retries"
|
|
||||||
"github.com/databricks/databricks-sdk-go/service/jobs"
|
"github.com/databricks/databricks-sdk-go/service/jobs"
|
||||||
"github.com/fatih/color"
|
"github.com/fatih/color"
|
||||||
flag "github.com/spf13/pflag"
|
flag "github.com/spf13/pflag"
|
||||||
|
@ -145,27 +144,17 @@ func (r *jobRunner) logFailedTasks(ctx context.Context, runId int64) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func pullRunIdCallback(runId *int64) func(info *retries.Info[jobs.Run]) {
|
func pullRunIdCallback(runId *int64) func(info *jobs.Run) {
|
||||||
return func(info *retries.Info[jobs.Run]) {
|
return func(i *jobs.Run) {
|
||||||
i := info.Info
|
|
||||||
if i == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if *runId == 0 {
|
if *runId == 0 {
|
||||||
*runId = i.RunId
|
*runId = i.RunId
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func logDebugCallback(ctx context.Context, runId *int64) func(info *retries.Info[jobs.Run]) {
|
func logDebugCallback(ctx context.Context, runId *int64) func(info *jobs.Run) {
|
||||||
var prevState *jobs.RunState
|
var prevState *jobs.RunState
|
||||||
return func(info *retries.Info[jobs.Run]) {
|
return func(i *jobs.Run) {
|
||||||
i := info.Info
|
|
||||||
if i == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
state := i.State
|
state := i.State
|
||||||
if state == nil {
|
if state == nil {
|
||||||
return
|
return
|
||||||
|
@ -173,23 +162,18 @@ func logDebugCallback(ctx context.Context, runId *int64) func(info *retries.Info
|
||||||
|
|
||||||
// Log the job run URL as soon as it is available.
|
// Log the job run URL as soon as it is available.
|
||||||
if prevState == nil {
|
if prevState == nil {
|
||||||
log.Infof(ctx, "Run available at %s", info.Info.RunPageUrl)
|
log.Infof(ctx, "Run available at %s", i.RunPageUrl)
|
||||||
}
|
}
|
||||||
if prevState == nil || prevState.LifeCycleState != state.LifeCycleState {
|
if prevState == nil || prevState.LifeCycleState != state.LifeCycleState {
|
||||||
log.Infof(ctx, "Run status: %s", info.Info.State.LifeCycleState)
|
log.Infof(ctx, "Run status: %s", i.State.LifeCycleState)
|
||||||
prevState = state
|
prevState = state
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func logProgressCallback(ctx context.Context, progressLogger *cmdio.Logger) func(info *retries.Info[jobs.Run]) {
|
func logProgressCallback(ctx context.Context, progressLogger *cmdio.Logger) func(info *jobs.Run) {
|
||||||
var prevState *jobs.RunState
|
var prevState *jobs.RunState
|
||||||
return func(info *retries.Info[jobs.Run]) {
|
return func(i *jobs.Run) {
|
||||||
i := info.Info
|
|
||||||
if i == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
state := i.State
|
state := i.State
|
||||||
if state == nil {
|
if state == nil {
|
||||||
return
|
return
|
||||||
|
@ -255,8 +239,15 @@ func (r *jobRunner) Run(ctx context.Context, opts *Options) (output.RunOutput, e
|
||||||
}
|
}
|
||||||
logProgress := logProgressCallback(ctx, progressLogger)
|
logProgress := logProgressCallback(ctx, progressLogger)
|
||||||
|
|
||||||
run, err := w.Jobs.RunNowAndWait(ctx, *req,
|
waiter, err := w.Jobs.RunNow(ctx, *req)
|
||||||
retries.Timeout[jobs.Run](jobRunTimeout), pullRunId, logDebug, logProgress)
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("cannot start job")
|
||||||
|
}
|
||||||
|
run, err := waiter.OnProgress(func(r *jobs.Run) {
|
||||||
|
pullRunId(r)
|
||||||
|
logDebug(r)
|
||||||
|
logProgress(r)
|
||||||
|
}).GetWithTimeout(jobRunTimeout)
|
||||||
if err != nil && runId != nil {
|
if err != nil && runId != nil {
|
||||||
r.logFailedTasks(ctx, *runId)
|
r.logFailedTasks(ctx, *runId)
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,25 @@
|
||||||
|
package bundle
|
||||||
|
|
||||||
|
import "context"
|
||||||
|
|
||||||
|
type seqMutator struct {
|
||||||
|
mutators []Mutator
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *seqMutator) Name() string {
|
||||||
|
return "seq"
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *seqMutator) Apply(ctx context.Context, b *Bundle) error {
|
||||||
|
for _, m := range s.mutators {
|
||||||
|
err := Apply(ctx, b, m)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func Seq(ms ...Mutator) Mutator {
|
||||||
|
return &seqMutator{mutators: ms}
|
||||||
|
}
|
|
@ -0,0 +1,91 @@
|
||||||
|
package bundle
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestSeqMutator(t *testing.T) {
|
||||||
|
m1 := &testMutator{}
|
||||||
|
m2 := &testMutator{}
|
||||||
|
m3 := &testMutator{}
|
||||||
|
seqMutator := Seq(m1, m2, m3)
|
||||||
|
|
||||||
|
bundle := &Bundle{}
|
||||||
|
err := Apply(context.Background(), bundle, seqMutator)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
assert.Equal(t, 1, m1.applyCalled)
|
||||||
|
assert.Equal(t, 1, m2.applyCalled)
|
||||||
|
assert.Equal(t, 1, m3.applyCalled)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSeqWithDeferredMutator(t *testing.T) {
|
||||||
|
m1 := &testMutator{}
|
||||||
|
m2 := &testMutator{}
|
||||||
|
m3 := &testMutator{}
|
||||||
|
m4 := &testMutator{}
|
||||||
|
seqMutator := Seq(m1, Defer(m2, m3), m4)
|
||||||
|
|
||||||
|
bundle := &Bundle{}
|
||||||
|
err := Apply(context.Background(), bundle, seqMutator)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
assert.Equal(t, 1, m1.applyCalled)
|
||||||
|
assert.Equal(t, 1, m2.applyCalled)
|
||||||
|
assert.Equal(t, 1, m3.applyCalled)
|
||||||
|
assert.Equal(t, 1, m4.applyCalled)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSeqWithErrorAndDeferredMutator(t *testing.T) {
|
||||||
|
errorMut := &mutatorWithError{errorMsg: "error msg"}
|
||||||
|
m1 := &testMutator{}
|
||||||
|
m2 := &testMutator{}
|
||||||
|
m3 := &testMutator{}
|
||||||
|
seqMutator := Seq(errorMut, Defer(m1, m2), m3)
|
||||||
|
|
||||||
|
bundle := &Bundle{}
|
||||||
|
err := Apply(context.Background(), bundle, seqMutator)
|
||||||
|
assert.Error(t, err)
|
||||||
|
|
||||||
|
assert.Equal(t, 1, errorMut.applyCalled)
|
||||||
|
assert.Equal(t, 0, m1.applyCalled)
|
||||||
|
assert.Equal(t, 0, m2.applyCalled)
|
||||||
|
assert.Equal(t, 0, m3.applyCalled)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSeqWithErrorInsideDeferredMutator(t *testing.T) {
|
||||||
|
errorMut := &mutatorWithError{errorMsg: "error msg"}
|
||||||
|
m1 := &testMutator{}
|
||||||
|
m2 := &testMutator{}
|
||||||
|
m3 := &testMutator{}
|
||||||
|
seqMutator := Seq(m1, Defer(errorMut, m2), m3)
|
||||||
|
|
||||||
|
bundle := &Bundle{}
|
||||||
|
err := Apply(context.Background(), bundle, seqMutator)
|
||||||
|
assert.Error(t, err)
|
||||||
|
|
||||||
|
assert.Equal(t, 1, m1.applyCalled)
|
||||||
|
assert.Equal(t, 1, errorMut.applyCalled)
|
||||||
|
assert.Equal(t, 1, m2.applyCalled)
|
||||||
|
assert.Equal(t, 0, m3.applyCalled)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSeqWithErrorInsideFinallyStage(t *testing.T) {
|
||||||
|
errorMut := &mutatorWithError{errorMsg: "error msg"}
|
||||||
|
m1 := &testMutator{}
|
||||||
|
m2 := &testMutator{}
|
||||||
|
m3 := &testMutator{}
|
||||||
|
seqMutator := Seq(m1, Defer(m2, errorMut), m3)
|
||||||
|
|
||||||
|
bundle := &Bundle{}
|
||||||
|
err := Apply(context.Background(), bundle, seqMutator)
|
||||||
|
assert.Error(t, err)
|
||||||
|
|
||||||
|
assert.Equal(t, 1, m1.applyCalled)
|
||||||
|
assert.Equal(t, 1, m2.applyCalled)
|
||||||
|
assert.Equal(t, 1, errorMut.applyCalled)
|
||||||
|
assert.Equal(t, 0, m3.applyCalled)
|
||||||
|
}
|
|
@ -21,7 +21,7 @@ func TestConflictingResourceIdsNoSubconfig(t *testing.T) {
|
||||||
func TestConflictingResourceIdsOneSubconfig(t *testing.T) {
|
func TestConflictingResourceIdsOneSubconfig(t *testing.T) {
|
||||||
b, err := bundle.Load("./conflicting_resource_ids/one_subconfiguration")
|
b, err := bundle.Load("./conflicting_resource_ids/one_subconfiguration")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
err = bundle.Apply(context.Background(), b, mutator.DefaultMutators())
|
err = bundle.Apply(context.Background(), b, bundle.Seq(mutator.DefaultMutators()...))
|
||||||
bundleConfigPath := filepath.FromSlash("conflicting_resource_ids/one_subconfiguration/bundle.yml")
|
bundleConfigPath := filepath.FromSlash("conflicting_resource_ids/one_subconfiguration/bundle.yml")
|
||||||
resourcesConfigPath := filepath.FromSlash("conflicting_resource_ids/one_subconfiguration/resources.yml")
|
resourcesConfigPath := filepath.FromSlash("conflicting_resource_ids/one_subconfiguration/resources.yml")
|
||||||
assert.ErrorContains(t, err, fmt.Sprintf("multiple resources named foo (job at %s, pipeline at %s)", bundleConfigPath, resourcesConfigPath))
|
assert.ErrorContains(t, err, fmt.Sprintf("multiple resources named foo (job at %s, pipeline at %s)", bundleConfigPath, resourcesConfigPath))
|
||||||
|
@ -30,7 +30,7 @@ func TestConflictingResourceIdsOneSubconfig(t *testing.T) {
|
||||||
func TestConflictingResourceIdsTwoSubconfigs(t *testing.T) {
|
func TestConflictingResourceIdsTwoSubconfigs(t *testing.T) {
|
||||||
b, err := bundle.Load("./conflicting_resource_ids/two_subconfigurations")
|
b, err := bundle.Load("./conflicting_resource_ids/two_subconfigurations")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
err = bundle.Apply(context.Background(), b, mutator.DefaultMutators())
|
err = bundle.Apply(context.Background(), b, bundle.Seq(mutator.DefaultMutators()...))
|
||||||
resources1ConfigPath := filepath.FromSlash("conflicting_resource_ids/two_subconfigurations/resources1.yml")
|
resources1ConfigPath := filepath.FromSlash("conflicting_resource_ids/two_subconfigurations/resources1.yml")
|
||||||
resources2ConfigPath := filepath.FromSlash("conflicting_resource_ids/two_subconfigurations/resources2.yml")
|
resources2ConfigPath := filepath.FromSlash("conflicting_resource_ids/two_subconfigurations/resources2.yml")
|
||||||
assert.ErrorContains(t, err, fmt.Sprintf("multiple resources named foo (job at %s, pipeline at %s)", resources1ConfigPath, resources2ConfigPath))
|
assert.ErrorContains(t, err, fmt.Sprintf("multiple resources named foo (job at %s, pipeline at %s)", resources1ConfigPath, resources2ConfigPath))
|
||||||
|
|
|
@ -0,0 +1,5 @@
|
||||||
|
bundle:
|
||||||
|
name: environment_empty
|
||||||
|
|
||||||
|
environments:
|
||||||
|
development:
|
|
@ -0,0 +1,12 @@
|
||||||
|
package config_tests
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestEnvironmentEmpty(t *testing.T) {
|
||||||
|
b := loadEnvironment(t, "./environment_empty", "development")
|
||||||
|
assert.Equal(t, "development", b.Config.Bundle.Environment)
|
||||||
|
}
|
|
@ -12,11 +12,10 @@ import (
|
||||||
|
|
||||||
func TestInterpolation(t *testing.T) {
|
func TestInterpolation(t *testing.T) {
|
||||||
b := load(t, "./interpolation")
|
b := load(t, "./interpolation")
|
||||||
err := bundle.Apply(context.Background(), b, []bundle.Mutator{
|
err := bundle.Apply(context.Background(), b, interpolation.Interpolate(
|
||||||
interpolation.Interpolate(
|
|
||||||
interpolation.IncludeLookupsInPath("bundle"),
|
interpolation.IncludeLookupsInPath("bundle"),
|
||||||
interpolation.IncludeLookupsInPath("workspace"),
|
interpolation.IncludeLookupsInPath("workspace"),
|
||||||
)})
|
))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, "foo bar", b.Config.Bundle.Name)
|
assert.Equal(t, "foo bar", b.Config.Bundle.Name)
|
||||||
assert.Equal(t, "foo bar | bar", b.Config.Resources.Jobs["my_job"].Name)
|
assert.Equal(t, "foo bar | bar", b.Config.Resources.Jobs["my_job"].Name)
|
||||||
|
|
|
@ -12,14 +12,14 @@ import (
|
||||||
func load(t *testing.T, path string) *bundle.Bundle {
|
func load(t *testing.T, path string) *bundle.Bundle {
|
||||||
b, err := bundle.Load(path)
|
b, err := bundle.Load(path)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
err = bundle.Apply(context.Background(), b, mutator.DefaultMutators())
|
err = bundle.Apply(context.Background(), b, bundle.Seq(mutator.DefaultMutators()...))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
return b
|
return b
|
||||||
}
|
}
|
||||||
|
|
||||||
func loadEnvironment(t *testing.T, path, env string) *bundle.Bundle {
|
func loadEnvironment(t *testing.T, path, env string) *bundle.Bundle {
|
||||||
b := load(t, path)
|
b := load(t, path)
|
||||||
err := bundle.Apply(context.Background(), b, []bundle.Mutator{mutator.SelectEnvironment(env)})
|
err := bundle.Apply(context.Background(), b, mutator.SelectEnvironment(env))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
return b
|
return b
|
||||||
}
|
}
|
||||||
|
|
|
@ -15,45 +15,45 @@ import (
|
||||||
func TestVariables(t *testing.T) {
|
func TestVariables(t *testing.T) {
|
||||||
t.Setenv("BUNDLE_VAR_b", "def")
|
t.Setenv("BUNDLE_VAR_b", "def")
|
||||||
b := load(t, "./variables/vanilla")
|
b := load(t, "./variables/vanilla")
|
||||||
err := bundle.Apply(context.Background(), b, []bundle.Mutator{
|
err := bundle.Apply(context.Background(), b, bundle.Seq(
|
||||||
mutator.SetVariables(),
|
mutator.SetVariables(),
|
||||||
interpolation.Interpolate(
|
interpolation.Interpolate(
|
||||||
interpolation.IncludeLookupsInPath(variable.VariableReferencePrefix),
|
interpolation.IncludeLookupsInPath(variable.VariableReferencePrefix),
|
||||||
)})
|
)))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, "abc def", b.Config.Bundle.Name)
|
assert.Equal(t, "abc def", b.Config.Bundle.Name)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestVariablesLoadingFailsWhenRequiredVariableIsNotSpecified(t *testing.T) {
|
func TestVariablesLoadingFailsWhenRequiredVariableIsNotSpecified(t *testing.T) {
|
||||||
b := load(t, "./variables/vanilla")
|
b := load(t, "./variables/vanilla")
|
||||||
err := bundle.Apply(context.Background(), b, []bundle.Mutator{
|
err := bundle.Apply(context.Background(), b, bundle.Seq(
|
||||||
mutator.SetVariables(),
|
mutator.SetVariables(),
|
||||||
interpolation.Interpolate(
|
interpolation.Interpolate(
|
||||||
interpolation.IncludeLookupsInPath(variable.VariableReferencePrefix),
|
interpolation.IncludeLookupsInPath(variable.VariableReferencePrefix),
|
||||||
)})
|
)))
|
||||||
assert.ErrorContains(t, err, "no value assigned to required variable b. Assignment can be done through the \"--var\" flag or by setting the BUNDLE_VAR_b environment variable")
|
assert.ErrorContains(t, err, "no value assigned to required variable b. Assignment can be done through the \"--var\" flag or by setting the BUNDLE_VAR_b environment variable")
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestVariablesEnvironmentsBlockOverride(t *testing.T) {
|
func TestVariablesEnvironmentsBlockOverride(t *testing.T) {
|
||||||
b := load(t, "./variables/env_overrides")
|
b := load(t, "./variables/env_overrides")
|
||||||
err := bundle.Apply(context.Background(), b, []bundle.Mutator{
|
err := bundle.Apply(context.Background(), b, bundle.Seq(
|
||||||
mutator.SelectEnvironment("env-with-single-variable-override"),
|
mutator.SelectEnvironment("env-with-single-variable-override"),
|
||||||
mutator.SetVariables(),
|
mutator.SetVariables(),
|
||||||
interpolation.Interpolate(
|
interpolation.Interpolate(
|
||||||
interpolation.IncludeLookupsInPath(variable.VariableReferencePrefix),
|
interpolation.IncludeLookupsInPath(variable.VariableReferencePrefix),
|
||||||
)})
|
)))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, "default-a dev-b", b.Config.Workspace.Profile)
|
assert.Equal(t, "default-a dev-b", b.Config.Workspace.Profile)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestVariablesEnvironmentsBlockOverrideForMultipleVariables(t *testing.T) {
|
func TestVariablesEnvironmentsBlockOverrideForMultipleVariables(t *testing.T) {
|
||||||
b := load(t, "./variables/env_overrides")
|
b := load(t, "./variables/env_overrides")
|
||||||
err := bundle.Apply(context.Background(), b, []bundle.Mutator{
|
err := bundle.Apply(context.Background(), b, bundle.Seq(
|
||||||
mutator.SelectEnvironment("env-with-two-variable-overrides"),
|
mutator.SelectEnvironment("env-with-two-variable-overrides"),
|
||||||
mutator.SetVariables(),
|
mutator.SetVariables(),
|
||||||
interpolation.Interpolate(
|
interpolation.Interpolate(
|
||||||
interpolation.IncludeLookupsInPath(variable.VariableReferencePrefix),
|
interpolation.IncludeLookupsInPath(variable.VariableReferencePrefix),
|
||||||
)})
|
)))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, "prod-a prod-b", b.Config.Workspace.Profile)
|
assert.Equal(t, "prod-a prod-b", b.Config.Workspace.Profile)
|
||||||
}
|
}
|
||||||
|
@ -61,34 +61,34 @@ func TestVariablesEnvironmentsBlockOverrideForMultipleVariables(t *testing.T) {
|
||||||
func TestVariablesEnvironmentsBlockOverrideWithProcessEnvVars(t *testing.T) {
|
func TestVariablesEnvironmentsBlockOverrideWithProcessEnvVars(t *testing.T) {
|
||||||
t.Setenv("BUNDLE_VAR_b", "env-var-b")
|
t.Setenv("BUNDLE_VAR_b", "env-var-b")
|
||||||
b := load(t, "./variables/env_overrides")
|
b := load(t, "./variables/env_overrides")
|
||||||
err := bundle.Apply(context.Background(), b, []bundle.Mutator{
|
err := bundle.Apply(context.Background(), b, bundle.Seq(
|
||||||
mutator.SelectEnvironment("env-with-two-variable-overrides"),
|
mutator.SelectEnvironment("env-with-two-variable-overrides"),
|
||||||
mutator.SetVariables(),
|
mutator.SetVariables(),
|
||||||
interpolation.Interpolate(
|
interpolation.Interpolate(
|
||||||
interpolation.IncludeLookupsInPath(variable.VariableReferencePrefix),
|
interpolation.IncludeLookupsInPath(variable.VariableReferencePrefix),
|
||||||
)})
|
)))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, "prod-a env-var-b", b.Config.Workspace.Profile)
|
assert.Equal(t, "prod-a env-var-b", b.Config.Workspace.Profile)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestVariablesEnvironmentsBlockOverrideWithMissingVariables(t *testing.T) {
|
func TestVariablesEnvironmentsBlockOverrideWithMissingVariables(t *testing.T) {
|
||||||
b := load(t, "./variables/env_overrides")
|
b := load(t, "./variables/env_overrides")
|
||||||
err := bundle.Apply(context.Background(), b, []bundle.Mutator{
|
err := bundle.Apply(context.Background(), b, bundle.Seq(
|
||||||
mutator.SelectEnvironment("env-missing-a-required-variable-assignment"),
|
mutator.SelectEnvironment("env-missing-a-required-variable-assignment"),
|
||||||
mutator.SetVariables(),
|
mutator.SetVariables(),
|
||||||
interpolation.Interpolate(
|
interpolation.Interpolate(
|
||||||
interpolation.IncludeLookupsInPath(variable.VariableReferencePrefix),
|
interpolation.IncludeLookupsInPath(variable.VariableReferencePrefix),
|
||||||
)})
|
)))
|
||||||
assert.ErrorContains(t, err, "no value assigned to required variable b. Assignment can be done through the \"--var\" flag or by setting the BUNDLE_VAR_b environment variable")
|
assert.ErrorContains(t, err, "no value assigned to required variable b. Assignment can be done through the \"--var\" flag or by setting the BUNDLE_VAR_b environment variable")
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestVariablesEnvironmentsBlockOverrideWithUndefinedVariables(t *testing.T) {
|
func TestVariablesEnvironmentsBlockOverrideWithUndefinedVariables(t *testing.T) {
|
||||||
b := load(t, "./variables/env_overrides")
|
b := load(t, "./variables/env_overrides")
|
||||||
err := bundle.Apply(context.Background(), b, []bundle.Mutator{
|
err := bundle.Apply(context.Background(), b, bundle.Seq(
|
||||||
mutator.SelectEnvironment("env-using-an-undefined-variable"),
|
mutator.SelectEnvironment("env-using-an-undefined-variable"),
|
||||||
mutator.SetVariables(),
|
mutator.SetVariables(),
|
||||||
interpolation.Interpolate(
|
interpolation.Interpolate(
|
||||||
interpolation.IncludeLookupsInPath(variable.VariableReferencePrefix),
|
interpolation.IncludeLookupsInPath(variable.VariableReferencePrefix),
|
||||||
)})
|
)))
|
||||||
assert.ErrorContains(t, err, "variable c is not defined but is assigned a value")
|
assert.ErrorContains(t, err, "variable c is not defined but is assigned a value")
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,179 @@
|
||||||
|
// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT.
|
||||||
|
|
||||||
|
package access_control
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/databricks/cli/cmd/root"
|
||||||
|
"github.com/databricks/cli/libs/cmdio"
|
||||||
|
"github.com/databricks/cli/libs/flags"
|
||||||
|
"github.com/databricks/databricks-sdk-go/service/iam"
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
)
|
||||||
|
|
||||||
|
var Cmd = &cobra.Command{
|
||||||
|
Use: "access-control",
|
||||||
|
Short: `These APIs manage access rules on resources in an account.`,
|
||||||
|
Long: `These APIs manage access rules on resources in an account. Currently, only
|
||||||
|
grant rules are supported. A grant rule specifies a role assigned to a set of
|
||||||
|
principals. A list of rules attached to a resource is called a rule set.`,
|
||||||
|
Annotations: map[string]string{
|
||||||
|
"package": "iam",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
// start get-assignable-roles-for-resource command
|
||||||
|
|
||||||
|
var getAssignableRolesForResourceReq iam.GetAssignableRolesForResourceRequest
|
||||||
|
var getAssignableRolesForResourceJson flags.JsonFlag
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
Cmd.AddCommand(getAssignableRolesForResourceCmd)
|
||||||
|
// TODO: short flags
|
||||||
|
getAssignableRolesForResourceCmd.Flags().Var(&getAssignableRolesForResourceJson, "json", `either inline JSON string or @path/to/file.json with request body`)
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
var getAssignableRolesForResourceCmd = &cobra.Command{
|
||||||
|
Use: "get-assignable-roles-for-resource RESOURCE",
|
||||||
|
Short: `Get assignable roles for a resource.`,
|
||||||
|
Long: `Get assignable roles for a resource.
|
||||||
|
|
||||||
|
Gets all the roles that can be granted on an account level resource. A role is
|
||||||
|
grantable if the rule set on the resource can contain an access rule of the
|
||||||
|
role.`,
|
||||||
|
|
||||||
|
Annotations: map[string]string{},
|
||||||
|
Args: func(cmd *cobra.Command, args []string) error {
|
||||||
|
check := cobra.ExactArgs(1)
|
||||||
|
if cmd.Flags().Changed("json") {
|
||||||
|
check = cobra.ExactArgs(0)
|
||||||
|
}
|
||||||
|
return check(cmd, args)
|
||||||
|
},
|
||||||
|
PreRunE: root.MustAccountClient,
|
||||||
|
RunE: func(cmd *cobra.Command, args []string) (err error) {
|
||||||
|
ctx := cmd.Context()
|
||||||
|
a := root.AccountClient(ctx)
|
||||||
|
if cmd.Flags().Changed("json") {
|
||||||
|
err = getAssignableRolesForResourceJson.Unmarshal(&getAssignableRolesForResourceReq)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
getAssignableRolesForResourceReq.Resource = args[0]
|
||||||
|
}
|
||||||
|
|
||||||
|
response, err := a.AccessControl.GetAssignableRolesForResource(ctx, getAssignableRolesForResourceReq)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return cmdio.Render(ctx, response)
|
||||||
|
},
|
||||||
|
// Disable completions since they are not applicable.
|
||||||
|
// Can be overridden by manual implementation in `override.go`.
|
||||||
|
ValidArgsFunction: cobra.NoFileCompletions,
|
||||||
|
}
|
||||||
|
|
||||||
|
// start get-rule-set command
|
||||||
|
|
||||||
|
var getRuleSetReq iam.GetRuleSetRequest
|
||||||
|
var getRuleSetJson flags.JsonFlag
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
Cmd.AddCommand(getRuleSetCmd)
|
||||||
|
// TODO: short flags
|
||||||
|
getRuleSetCmd.Flags().Var(&getRuleSetJson, "json", `either inline JSON string or @path/to/file.json with request body`)
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
var getRuleSetCmd = &cobra.Command{
|
||||||
|
Use: "get-rule-set NAME ETAG",
|
||||||
|
Short: `Get a rule set.`,
|
||||||
|
Long: `Get a rule set.
|
||||||
|
|
||||||
|
Get a rule set by its name. A rule set is always attached to a resource and
|
||||||
|
contains a list of access rules on the said resource. Currently only a default
|
||||||
|
rule set for each resource is supported.`,
|
||||||
|
|
||||||
|
Annotations: map[string]string{},
|
||||||
|
Args: func(cmd *cobra.Command, args []string) error {
|
||||||
|
check := cobra.ExactArgs(2)
|
||||||
|
if cmd.Flags().Changed("json") {
|
||||||
|
check = cobra.ExactArgs(0)
|
||||||
|
}
|
||||||
|
return check(cmd, args)
|
||||||
|
},
|
||||||
|
PreRunE: root.MustAccountClient,
|
||||||
|
RunE: func(cmd *cobra.Command, args []string) (err error) {
|
||||||
|
ctx := cmd.Context()
|
||||||
|
a := root.AccountClient(ctx)
|
||||||
|
if cmd.Flags().Changed("json") {
|
||||||
|
err = getRuleSetJson.Unmarshal(&getRuleSetReq)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
getRuleSetReq.Name = args[0]
|
||||||
|
getRuleSetReq.Etag = args[1]
|
||||||
|
}
|
||||||
|
|
||||||
|
response, err := a.AccessControl.GetRuleSet(ctx, getRuleSetReq)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return cmdio.Render(ctx, response)
|
||||||
|
},
|
||||||
|
// Disable completions since they are not applicable.
|
||||||
|
// Can be overridden by manual implementation in `override.go`.
|
||||||
|
ValidArgsFunction: cobra.NoFileCompletions,
|
||||||
|
}
|
||||||
|
|
||||||
|
// start update-rule-set command
|
||||||
|
|
||||||
|
var updateRuleSetReq iam.UpdateRuleSetRequest
|
||||||
|
var updateRuleSetJson flags.JsonFlag
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
Cmd.AddCommand(updateRuleSetCmd)
|
||||||
|
// TODO: short flags
|
||||||
|
updateRuleSetCmd.Flags().Var(&updateRuleSetJson, "json", `either inline JSON string or @path/to/file.json with request body`)
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
var updateRuleSetCmd = &cobra.Command{
|
||||||
|
Use: "update-rule-set",
|
||||||
|
Short: `Update a rule set.`,
|
||||||
|
Long: `Update a rule set.
|
||||||
|
|
||||||
|
Replace the rules of a rule set. First, use get to read the current version of
|
||||||
|
the rule set before modifying it. This pattern helps prevent conflicts between
|
||||||
|
concurrent updates.`,
|
||||||
|
|
||||||
|
Annotations: map[string]string{},
|
||||||
|
PreRunE: root.MustAccountClient,
|
||||||
|
RunE: func(cmd *cobra.Command, args []string) (err error) {
|
||||||
|
ctx := cmd.Context()
|
||||||
|
a := root.AccountClient(ctx)
|
||||||
|
if cmd.Flags().Changed("json") {
|
||||||
|
err = updateRuleSetJson.Unmarshal(&updateRuleSetReq)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
return fmt.Errorf("please provide command input in JSON format by specifying the --json flag")
|
||||||
|
}
|
||||||
|
|
||||||
|
response, err := a.AccessControl.UpdateRuleSet(ctx, updateRuleSetReq)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return cmdio.Render(ctx, response)
|
||||||
|
},
|
||||||
|
// Disable completions since they are not applicable.
|
||||||
|
// Can be overridden by manual implementation in `override.go`.
|
||||||
|
ValidArgsFunction: cobra.NoFileCompletions,
|
||||||
|
}
|
||||||
|
|
||||||
|
// end service AccountAccessControl
|
|
@ -4,6 +4,7 @@ package billable_usage
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/databricks/cli/cmd/root"
|
"github.com/databricks/cli/cmd/root"
|
||||||
|
"github.com/databricks/cli/libs/flags"
|
||||||
"github.com/databricks/databricks-sdk-go/service/billing"
|
"github.com/databricks/databricks-sdk-go/service/billing"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
)
|
)
|
||||||
|
@ -13,15 +14,20 @@ var Cmd = &cobra.Command{
|
||||||
Short: `This API allows you to download billable usage logs for the specified account and date range.`,
|
Short: `This API allows you to download billable usage logs for the specified account and date range.`,
|
||||||
Long: `This API allows you to download billable usage logs for the specified account
|
Long: `This API allows you to download billable usage logs for the specified account
|
||||||
and date range. This feature works with all account types.`,
|
and date range. This feature works with all account types.`,
|
||||||
|
Annotations: map[string]string{
|
||||||
|
"package": "billing",
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
// start download command
|
// start download command
|
||||||
|
|
||||||
var downloadReq billing.DownloadRequest
|
var downloadReq billing.DownloadRequest
|
||||||
|
var downloadJson flags.JsonFlag
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
Cmd.AddCommand(downloadCmd)
|
Cmd.AddCommand(downloadCmd)
|
||||||
// TODO: short flags
|
// TODO: short flags
|
||||||
|
downloadCmd.Flags().Var(&downloadJson, "json", `either inline JSON string or @path/to/file.json with request body`)
|
||||||
|
|
||||||
downloadCmd.Flags().BoolVar(&downloadReq.PersonalData, "personal-data", downloadReq.PersonalData, `Specify whether to include personally identifiable information in the billable usage logs, for example the email addresses of cluster creators.`)
|
downloadCmd.Flags().BoolVar(&downloadReq.PersonalData, "personal-data", downloadReq.PersonalData, `Specify whether to include personally identifiable information in the billable usage logs, for example the email addresses of cluster creators.`)
|
||||||
|
|
||||||
|
@ -39,13 +45,26 @@ var downloadCmd = &cobra.Command{
|
||||||
[CSV file schema]: https://docs.databricks.com/administration-guide/account-settings/usage-analysis.html#schema`,
|
[CSV file schema]: https://docs.databricks.com/administration-guide/account-settings/usage-analysis.html#schema`,
|
||||||
|
|
||||||
Annotations: map[string]string{},
|
Annotations: map[string]string{},
|
||||||
Args: cobra.ExactArgs(2),
|
Args: func(cmd *cobra.Command, args []string) error {
|
||||||
|
check := cobra.ExactArgs(2)
|
||||||
|
if cmd.Flags().Changed("json") {
|
||||||
|
check = cobra.ExactArgs(0)
|
||||||
|
}
|
||||||
|
return check(cmd, args)
|
||||||
|
},
|
||||||
PreRunE: root.MustAccountClient,
|
PreRunE: root.MustAccountClient,
|
||||||
RunE: func(cmd *cobra.Command, args []string) (err error) {
|
RunE: func(cmd *cobra.Command, args []string) (err error) {
|
||||||
ctx := cmd.Context()
|
ctx := cmd.Context()
|
||||||
a := root.AccountClient(ctx)
|
a := root.AccountClient(ctx)
|
||||||
|
if cmd.Flags().Changed("json") {
|
||||||
|
err = downloadJson.Unmarshal(&downloadReq)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
downloadReq.StartMonth = args[0]
|
downloadReq.StartMonth = args[0]
|
||||||
downloadReq.EndMonth = args[1]
|
downloadReq.EndMonth = args[1]
|
||||||
|
}
|
||||||
|
|
||||||
err = a.BillableUsage.Download(ctx, downloadReq)
|
err = a.BillableUsage.Download(ctx, downloadReq)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -53,6 +72,9 @@ var downloadCmd = &cobra.Command{
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
},
|
},
|
||||||
|
// Disable completions since they are not applicable.
|
||||||
|
// Can be overridden by manual implementation in `override.go`.
|
||||||
|
ValidArgsFunction: cobra.NoFileCompletions,
|
||||||
}
|
}
|
||||||
|
|
||||||
// end service BillableUsage
|
// end service BillableUsage
|
||||||
|
|
|
@ -17,6 +17,12 @@ var Cmd = &cobra.Command{
|
||||||
Short: `These APIs manage budget configuration including notifications for exceeding a budget for a period.`,
|
Short: `These APIs manage budget configuration including notifications for exceeding a budget for a period.`,
|
||||||
Long: `These APIs manage budget configuration including notifications for exceeding a
|
Long: `These APIs manage budget configuration including notifications for exceeding a
|
||||||
budget for a period. They can also retrieve the status of each budget.`,
|
budget for a period. They can also retrieve the status of each budget.`,
|
||||||
|
Annotations: map[string]string{
|
||||||
|
"package": "billing",
|
||||||
|
},
|
||||||
|
|
||||||
|
// This service is being previewed; hide from help output.
|
||||||
|
Hidden: true,
|
||||||
}
|
}
|
||||||
|
|
||||||
// start create command
|
// start create command
|
||||||
|
@ -43,15 +49,14 @@ var createCmd = &cobra.Command{
|
||||||
RunE: func(cmd *cobra.Command, args []string) (err error) {
|
RunE: func(cmd *cobra.Command, args []string) (err error) {
|
||||||
ctx := cmd.Context()
|
ctx := cmd.Context()
|
||||||
a := root.AccountClient(ctx)
|
a := root.AccountClient(ctx)
|
||||||
|
if cmd.Flags().Changed("json") {
|
||||||
err = createJson.Unmarshal(&createReq)
|
err = createJson.Unmarshal(&createReq)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
_, err = fmt.Sscan(args[0], &createReq.Budget)
|
} else {
|
||||||
if err != nil {
|
return fmt.Errorf("please provide command input in JSON format by specifying the --json flag")
|
||||||
return fmt.Errorf("invalid BUDGET: %s", args[0])
|
|
||||||
}
|
}
|
||||||
createReq.BudgetId = args[1]
|
|
||||||
|
|
||||||
response, err := a.Budgets.Create(ctx, createReq)
|
response, err := a.Budgets.Create(ctx, createReq)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -59,15 +64,20 @@ var createCmd = &cobra.Command{
|
||||||
}
|
}
|
||||||
return cmdio.Render(ctx, response)
|
return cmdio.Render(ctx, response)
|
||||||
},
|
},
|
||||||
|
// Disable completions since they are not applicable.
|
||||||
|
// Can be overridden by manual implementation in `override.go`.
|
||||||
|
ValidArgsFunction: cobra.NoFileCompletions,
|
||||||
}
|
}
|
||||||
|
|
||||||
// start delete command
|
// start delete command
|
||||||
|
|
||||||
var deleteReq billing.DeleteBudgetRequest
|
var deleteReq billing.DeleteBudgetRequest
|
||||||
|
var deleteJson flags.JsonFlag
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
Cmd.AddCommand(deleteCmd)
|
Cmd.AddCommand(deleteCmd)
|
||||||
// TODO: short flags
|
// TODO: short flags
|
||||||
|
deleteCmd.Flags().Var(&deleteJson, "json", `either inline JSON string or @path/to/file.json with request body`)
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -83,11 +93,20 @@ var deleteCmd = &cobra.Command{
|
||||||
RunE: func(cmd *cobra.Command, args []string) (err error) {
|
RunE: func(cmd *cobra.Command, args []string) (err error) {
|
||||||
ctx := cmd.Context()
|
ctx := cmd.Context()
|
||||||
a := root.AccountClient(ctx)
|
a := root.AccountClient(ctx)
|
||||||
if len(args) == 0 {
|
if cmd.Flags().Changed("json") {
|
||||||
names, err := a.Budgets.BudgetWithStatusNameToBudgetIdMap(ctx)
|
err = deleteJson.Unmarshal(&deleteReq)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
if len(args) == 0 {
|
||||||
|
promptSpinner := cmdio.Spinner(ctx)
|
||||||
|
promptSpinner <- "No BUDGET_ID argument specified. Loading names for Budgets drop-down."
|
||||||
|
names, err := a.Budgets.BudgetWithStatusNameToBudgetIdMap(ctx)
|
||||||
|
close(promptSpinner)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to load names for Budgets drop-down. Please manually specify required arguments. Original error: %w", err)
|
||||||
|
}
|
||||||
id, err := cmdio.Select(ctx, names, "Budget ID")
|
id, err := cmdio.Select(ctx, names, "Budget ID")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -98,6 +117,7 @@ var deleteCmd = &cobra.Command{
|
||||||
return fmt.Errorf("expected to have budget id")
|
return fmt.Errorf("expected to have budget id")
|
||||||
}
|
}
|
||||||
deleteReq.BudgetId = args[0]
|
deleteReq.BudgetId = args[0]
|
||||||
|
}
|
||||||
|
|
||||||
err = a.Budgets.Delete(ctx, deleteReq)
|
err = a.Budgets.Delete(ctx, deleteReq)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -105,15 +125,20 @@ var deleteCmd = &cobra.Command{
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
},
|
},
|
||||||
|
// Disable completions since they are not applicable.
|
||||||
|
// Can be overridden by manual implementation in `override.go`.
|
||||||
|
ValidArgsFunction: cobra.NoFileCompletions,
|
||||||
}
|
}
|
||||||
|
|
||||||
// start get command
|
// start get command
|
||||||
|
|
||||||
var getReq billing.GetBudgetRequest
|
var getReq billing.GetBudgetRequest
|
||||||
|
var getJson flags.JsonFlag
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
Cmd.AddCommand(getCmd)
|
Cmd.AddCommand(getCmd)
|
||||||
// TODO: short flags
|
// TODO: short flags
|
||||||
|
getCmd.Flags().Var(&getJson, "json", `either inline JSON string or @path/to/file.json with request body`)
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -130,11 +155,20 @@ var getCmd = &cobra.Command{
|
||||||
RunE: func(cmd *cobra.Command, args []string) (err error) {
|
RunE: func(cmd *cobra.Command, args []string) (err error) {
|
||||||
ctx := cmd.Context()
|
ctx := cmd.Context()
|
||||||
a := root.AccountClient(ctx)
|
a := root.AccountClient(ctx)
|
||||||
if len(args) == 0 {
|
if cmd.Flags().Changed("json") {
|
||||||
names, err := a.Budgets.BudgetWithStatusNameToBudgetIdMap(ctx)
|
err = getJson.Unmarshal(&getReq)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
if len(args) == 0 {
|
||||||
|
promptSpinner := cmdio.Spinner(ctx)
|
||||||
|
promptSpinner <- "No BUDGET_ID argument specified. Loading names for Budgets drop-down."
|
||||||
|
names, err := a.Budgets.BudgetWithStatusNameToBudgetIdMap(ctx)
|
||||||
|
close(promptSpinner)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to load names for Budgets drop-down. Please manually specify required arguments. Original error: %w", err)
|
||||||
|
}
|
||||||
id, err := cmdio.Select(ctx, names, "Budget ID")
|
id, err := cmdio.Select(ctx, names, "Budget ID")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -145,6 +179,7 @@ var getCmd = &cobra.Command{
|
||||||
return fmt.Errorf("expected to have budget id")
|
return fmt.Errorf("expected to have budget id")
|
||||||
}
|
}
|
||||||
getReq.BudgetId = args[0]
|
getReq.BudgetId = args[0]
|
||||||
|
}
|
||||||
|
|
||||||
response, err := a.Budgets.Get(ctx, getReq)
|
response, err := a.Budgets.Get(ctx, getReq)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -152,6 +187,9 @@ var getCmd = &cobra.Command{
|
||||||
}
|
}
|
||||||
return cmdio.Render(ctx, response)
|
return cmdio.Render(ctx, response)
|
||||||
},
|
},
|
||||||
|
// Disable completions since they are not applicable.
|
||||||
|
// Can be overridden by manual implementation in `override.go`.
|
||||||
|
ValidArgsFunction: cobra.NoFileCompletions,
|
||||||
}
|
}
|
||||||
|
|
||||||
// start list command
|
// start list command
|
||||||
|
@ -180,6 +218,9 @@ var listCmd = &cobra.Command{
|
||||||
}
|
}
|
||||||
return cmdio.Render(ctx, response)
|
return cmdio.Render(ctx, response)
|
||||||
},
|
},
|
||||||
|
// Disable completions since they are not applicable.
|
||||||
|
// Can be overridden by manual implementation in `override.go`.
|
||||||
|
ValidArgsFunction: cobra.NoFileCompletions,
|
||||||
}
|
}
|
||||||
|
|
||||||
// start update command
|
// start update command
|
||||||
|
@ -207,15 +248,14 @@ var updateCmd = &cobra.Command{
|
||||||
RunE: func(cmd *cobra.Command, args []string) (err error) {
|
RunE: func(cmd *cobra.Command, args []string) (err error) {
|
||||||
ctx := cmd.Context()
|
ctx := cmd.Context()
|
||||||
a := root.AccountClient(ctx)
|
a := root.AccountClient(ctx)
|
||||||
|
if cmd.Flags().Changed("json") {
|
||||||
err = updateJson.Unmarshal(&updateReq)
|
err = updateJson.Unmarshal(&updateReq)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
_, err = fmt.Sscan(args[0], &updateReq.Budget)
|
} else {
|
||||||
if err != nil {
|
return fmt.Errorf("please provide command input in JSON format by specifying the --json flag")
|
||||||
return fmt.Errorf("invalid BUDGET: %s", args[0])
|
|
||||||
}
|
}
|
||||||
updateReq.BudgetId = args[1]
|
|
||||||
|
|
||||||
err = a.Budgets.Update(ctx, updateReq)
|
err = a.Budgets.Update(ctx, updateReq)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -223,6 +263,9 @@ var updateCmd = &cobra.Command{
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
},
|
},
|
||||||
|
// Disable completions since they are not applicable.
|
||||||
|
// Can be overridden by manual implementation in `override.go`.
|
||||||
|
ValidArgsFunction: cobra.NoFileCompletions,
|
||||||
}
|
}
|
||||||
|
|
||||||
// end service Budgets
|
// end service Budgets
|
||||||
|
|
|
@ -6,6 +6,7 @@ import (
|
||||||
"github.com/databricks/cli/cmd/root"
|
"github.com/databricks/cli/cmd/root"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
|
|
||||||
|
account_access_control "github.com/databricks/cli/cmd/account/access-control"
|
||||||
billable_usage "github.com/databricks/cli/cmd/account/billable-usage"
|
billable_usage "github.com/databricks/cli/cmd/account/billable-usage"
|
||||||
budgets "github.com/databricks/cli/cmd/account/budgets"
|
budgets "github.com/databricks/cli/cmd/account/budgets"
|
||||||
credentials "github.com/databricks/cli/cmd/account/credentials"
|
credentials "github.com/databricks/cli/cmd/account/credentials"
|
||||||
|
@ -20,7 +21,9 @@ import (
|
||||||
o_auth_enrollment "github.com/databricks/cli/cmd/account/o-auth-enrollment"
|
o_auth_enrollment "github.com/databricks/cli/cmd/account/o-auth-enrollment"
|
||||||
private_access "github.com/databricks/cli/cmd/account/private-access"
|
private_access "github.com/databricks/cli/cmd/account/private-access"
|
||||||
published_app_integration "github.com/databricks/cli/cmd/account/published-app-integration"
|
published_app_integration "github.com/databricks/cli/cmd/account/published-app-integration"
|
||||||
|
service_principal_secrets "github.com/databricks/cli/cmd/account/service-principal-secrets"
|
||||||
account_service_principals "github.com/databricks/cli/cmd/account/service-principals"
|
account_service_principals "github.com/databricks/cli/cmd/account/service-principals"
|
||||||
|
account_settings "github.com/databricks/cli/cmd/account/settings"
|
||||||
storage "github.com/databricks/cli/cmd/account/storage"
|
storage "github.com/databricks/cli/cmd/account/storage"
|
||||||
account_storage_credentials "github.com/databricks/cli/cmd/account/storage-credentials"
|
account_storage_credentials "github.com/databricks/cli/cmd/account/storage-credentials"
|
||||||
account_users "github.com/databricks/cli/cmd/account/users"
|
account_users "github.com/databricks/cli/cmd/account/users"
|
||||||
|
@ -37,6 +40,7 @@ var accountCmd = &cobra.Command{
|
||||||
func init() {
|
func init() {
|
||||||
root.RootCmd.AddCommand(accountCmd)
|
root.RootCmd.AddCommand(accountCmd)
|
||||||
|
|
||||||
|
accountCmd.AddCommand(account_access_control.Cmd)
|
||||||
accountCmd.AddCommand(billable_usage.Cmd)
|
accountCmd.AddCommand(billable_usage.Cmd)
|
||||||
accountCmd.AddCommand(budgets.Cmd)
|
accountCmd.AddCommand(budgets.Cmd)
|
||||||
accountCmd.AddCommand(credentials.Cmd)
|
accountCmd.AddCommand(credentials.Cmd)
|
||||||
|
@ -51,11 +55,40 @@ func init() {
|
||||||
accountCmd.AddCommand(o_auth_enrollment.Cmd)
|
accountCmd.AddCommand(o_auth_enrollment.Cmd)
|
||||||
accountCmd.AddCommand(private_access.Cmd)
|
accountCmd.AddCommand(private_access.Cmd)
|
||||||
accountCmd.AddCommand(published_app_integration.Cmd)
|
accountCmd.AddCommand(published_app_integration.Cmd)
|
||||||
|
accountCmd.AddCommand(service_principal_secrets.Cmd)
|
||||||
accountCmd.AddCommand(account_service_principals.Cmd)
|
accountCmd.AddCommand(account_service_principals.Cmd)
|
||||||
|
accountCmd.AddCommand(account_settings.Cmd)
|
||||||
accountCmd.AddCommand(storage.Cmd)
|
accountCmd.AddCommand(storage.Cmd)
|
||||||
accountCmd.AddCommand(account_storage_credentials.Cmd)
|
accountCmd.AddCommand(account_storage_credentials.Cmd)
|
||||||
accountCmd.AddCommand(account_users.Cmd)
|
accountCmd.AddCommand(account_users.Cmd)
|
||||||
accountCmd.AddCommand(vpc_endpoints.Cmd)
|
accountCmd.AddCommand(vpc_endpoints.Cmd)
|
||||||
accountCmd.AddCommand(workspace_assignment.Cmd)
|
accountCmd.AddCommand(workspace_assignment.Cmd)
|
||||||
accountCmd.AddCommand(workspaces.Cmd)
|
accountCmd.AddCommand(workspaces.Cmd)
|
||||||
|
|
||||||
|
// Register commands with groups
|
||||||
|
account_access_control.Cmd.GroupID = "iam"
|
||||||
|
billable_usage.Cmd.GroupID = "billing"
|
||||||
|
budgets.Cmd.GroupID = "billing"
|
||||||
|
credentials.Cmd.GroupID = "provisioning"
|
||||||
|
custom_app_integration.Cmd.GroupID = "oauth2"
|
||||||
|
encryption_keys.Cmd.GroupID = "provisioning"
|
||||||
|
account_groups.Cmd.GroupID = "iam"
|
||||||
|
account_ip_access_lists.Cmd.GroupID = "settings"
|
||||||
|
log_delivery.Cmd.GroupID = "billing"
|
||||||
|
account_metastore_assignments.Cmd.GroupID = "catalog"
|
||||||
|
account_metastores.Cmd.GroupID = "catalog"
|
||||||
|
networks.Cmd.GroupID = "provisioning"
|
||||||
|
o_auth_enrollment.Cmd.GroupID = "oauth2"
|
||||||
|
private_access.Cmd.GroupID = "provisioning"
|
||||||
|
published_app_integration.Cmd.GroupID = "oauth2"
|
||||||
|
service_principal_secrets.Cmd.GroupID = "oauth2"
|
||||||
|
account_service_principals.Cmd.GroupID = "iam"
|
||||||
|
account_settings.Cmd.GroupID = "settings"
|
||||||
|
storage.Cmd.GroupID = "provisioning"
|
||||||
|
account_storage_credentials.Cmd.GroupID = "catalog"
|
||||||
|
account_users.Cmd.GroupID = "iam"
|
||||||
|
vpc_endpoints.Cmd.GroupID = "provisioning"
|
||||||
|
workspace_assignment.Cmd.GroupID = "iam"
|
||||||
|
workspaces.Cmd.GroupID = "provisioning"
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -20,6 +20,9 @@ var Cmd = &cobra.Command{
|
||||||
Databricks can deploy clusters in the appropriate VPC for the new workspace. A
|
Databricks can deploy clusters in the appropriate VPC for the new workspace. A
|
||||||
credential configuration encapsulates this role information, and its ID is
|
credential configuration encapsulates this role information, and its ID is
|
||||||
used when creating a new workspace.`,
|
used when creating a new workspace.`,
|
||||||
|
Annotations: map[string]string{
|
||||||
|
"package": "provisioning",
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
// start create command
|
// start create command
|
||||||
|
@ -59,14 +62,13 @@ var createCmd = &cobra.Command{
|
||||||
RunE: func(cmd *cobra.Command, args []string) (err error) {
|
RunE: func(cmd *cobra.Command, args []string) (err error) {
|
||||||
ctx := cmd.Context()
|
ctx := cmd.Context()
|
||||||
a := root.AccountClient(ctx)
|
a := root.AccountClient(ctx)
|
||||||
|
if cmd.Flags().Changed("json") {
|
||||||
err = createJson.Unmarshal(&createReq)
|
err = createJson.Unmarshal(&createReq)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
createReq.CredentialsName = args[0]
|
} else {
|
||||||
_, err = fmt.Sscan(args[1], &createReq.AwsCredentials)
|
return fmt.Errorf("please provide command input in JSON format by specifying the --json flag")
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("invalid AWS_CREDENTIALS: %s", args[1])
|
|
||||||
}
|
}
|
||||||
|
|
||||||
response, err := a.Credentials.Create(ctx, createReq)
|
response, err := a.Credentials.Create(ctx, createReq)
|
||||||
|
@ -75,15 +77,20 @@ var createCmd = &cobra.Command{
|
||||||
}
|
}
|
||||||
return cmdio.Render(ctx, response)
|
return cmdio.Render(ctx, response)
|
||||||
},
|
},
|
||||||
|
// Disable completions since they are not applicable.
|
||||||
|
// Can be overridden by manual implementation in `override.go`.
|
||||||
|
ValidArgsFunction: cobra.NoFileCompletions,
|
||||||
}
|
}
|
||||||
|
|
||||||
// start delete command
|
// start delete command
|
||||||
|
|
||||||
var deleteReq provisioning.DeleteCredentialRequest
|
var deleteReq provisioning.DeleteCredentialRequest
|
||||||
|
var deleteJson flags.JsonFlag
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
Cmd.AddCommand(deleteCmd)
|
Cmd.AddCommand(deleteCmd)
|
||||||
// TODO: short flags
|
// TODO: short flags
|
||||||
|
deleteCmd.Flags().Var(&deleteJson, "json", `either inline JSON string or @path/to/file.json with request body`)
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -101,11 +108,20 @@ var deleteCmd = &cobra.Command{
|
||||||
RunE: func(cmd *cobra.Command, args []string) (err error) {
|
RunE: func(cmd *cobra.Command, args []string) (err error) {
|
||||||
ctx := cmd.Context()
|
ctx := cmd.Context()
|
||||||
a := root.AccountClient(ctx)
|
a := root.AccountClient(ctx)
|
||||||
if len(args) == 0 {
|
if cmd.Flags().Changed("json") {
|
||||||
names, err := a.Credentials.CredentialCredentialsNameToCredentialsIdMap(ctx)
|
err = deleteJson.Unmarshal(&deleteReq)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
if len(args) == 0 {
|
||||||
|
promptSpinner := cmdio.Spinner(ctx)
|
||||||
|
promptSpinner <- "No CREDENTIALS_ID argument specified. Loading names for Credentials drop-down."
|
||||||
|
names, err := a.Credentials.CredentialCredentialsNameToCredentialsIdMap(ctx)
|
||||||
|
close(promptSpinner)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to load names for Credentials drop-down. Please manually specify required arguments. Original error: %w", err)
|
||||||
|
}
|
||||||
id, err := cmdio.Select(ctx, names, "Databricks Account API credential configuration ID")
|
id, err := cmdio.Select(ctx, names, "Databricks Account API credential configuration ID")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -116,6 +132,7 @@ var deleteCmd = &cobra.Command{
|
||||||
return fmt.Errorf("expected to have databricks account api credential configuration id")
|
return fmt.Errorf("expected to have databricks account api credential configuration id")
|
||||||
}
|
}
|
||||||
deleteReq.CredentialsId = args[0]
|
deleteReq.CredentialsId = args[0]
|
||||||
|
}
|
||||||
|
|
||||||
err = a.Credentials.Delete(ctx, deleteReq)
|
err = a.Credentials.Delete(ctx, deleteReq)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -123,15 +140,20 @@ var deleteCmd = &cobra.Command{
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
},
|
},
|
||||||
|
// Disable completions since they are not applicable.
|
||||||
|
// Can be overridden by manual implementation in `override.go`.
|
||||||
|
ValidArgsFunction: cobra.NoFileCompletions,
|
||||||
}
|
}
|
||||||
|
|
||||||
// start get command
|
// start get command
|
||||||
|
|
||||||
var getReq provisioning.GetCredentialRequest
|
var getReq provisioning.GetCredentialRequest
|
||||||
|
var getJson flags.JsonFlag
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
Cmd.AddCommand(getCmd)
|
Cmd.AddCommand(getCmd)
|
||||||
// TODO: short flags
|
// TODO: short flags
|
||||||
|
getCmd.Flags().Var(&getJson, "json", `either inline JSON string or @path/to/file.json with request body`)
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -148,11 +170,20 @@ var getCmd = &cobra.Command{
|
||||||
RunE: func(cmd *cobra.Command, args []string) (err error) {
|
RunE: func(cmd *cobra.Command, args []string) (err error) {
|
||||||
ctx := cmd.Context()
|
ctx := cmd.Context()
|
||||||
a := root.AccountClient(ctx)
|
a := root.AccountClient(ctx)
|
||||||
if len(args) == 0 {
|
if cmd.Flags().Changed("json") {
|
||||||
names, err := a.Credentials.CredentialCredentialsNameToCredentialsIdMap(ctx)
|
err = getJson.Unmarshal(&getReq)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
if len(args) == 0 {
|
||||||
|
promptSpinner := cmdio.Spinner(ctx)
|
||||||
|
promptSpinner <- "No CREDENTIALS_ID argument specified. Loading names for Credentials drop-down."
|
||||||
|
names, err := a.Credentials.CredentialCredentialsNameToCredentialsIdMap(ctx)
|
||||||
|
close(promptSpinner)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to load names for Credentials drop-down. Please manually specify required arguments. Original error: %w", err)
|
||||||
|
}
|
||||||
id, err := cmdio.Select(ctx, names, "Databricks Account API credential configuration ID")
|
id, err := cmdio.Select(ctx, names, "Databricks Account API credential configuration ID")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -163,6 +194,7 @@ var getCmd = &cobra.Command{
|
||||||
return fmt.Errorf("expected to have databricks account api credential configuration id")
|
return fmt.Errorf("expected to have databricks account api credential configuration id")
|
||||||
}
|
}
|
||||||
getReq.CredentialsId = args[0]
|
getReq.CredentialsId = args[0]
|
||||||
|
}
|
||||||
|
|
||||||
response, err := a.Credentials.Get(ctx, getReq)
|
response, err := a.Credentials.Get(ctx, getReq)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -170,6 +202,9 @@ var getCmd = &cobra.Command{
|
||||||
}
|
}
|
||||||
return cmdio.Render(ctx, response)
|
return cmdio.Render(ctx, response)
|
||||||
},
|
},
|
||||||
|
// Disable completions since they are not applicable.
|
||||||
|
// Can be overridden by manual implementation in `override.go`.
|
||||||
|
ValidArgsFunction: cobra.NoFileCompletions,
|
||||||
}
|
}
|
||||||
|
|
||||||
// start list command
|
// start list command
|
||||||
|
@ -198,6 +233,9 @@ var listCmd = &cobra.Command{
|
||||||
}
|
}
|
||||||
return cmdio.Render(ctx, response)
|
return cmdio.Render(ctx, response)
|
||||||
},
|
},
|
||||||
|
// Disable completions since they are not applicable.
|
||||||
|
// Can be overridden by manual implementation in `override.go`.
|
||||||
|
ValidArgsFunction: cobra.NoFileCompletions,
|
||||||
}
|
}
|
||||||
|
|
||||||
// end service Credentials
|
// end service Credentials
|
||||||
|
|
|
@ -22,6 +22,9 @@ var Cmd = &cobra.Command{
|
||||||
**Note:** You can only add/use the OAuth custom application integrations when
|
**Note:** You can only add/use the OAuth custom application integrations when
|
||||||
OAuth enrollment status is enabled. For more details see
|
OAuth enrollment status is enabled. For more details see
|
||||||
:method:OAuthEnrollment/create`,
|
:method:OAuthEnrollment/create`,
|
||||||
|
Annotations: map[string]string{
|
||||||
|
"package": "oauth2",
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
// start create command
|
// start create command
|
||||||
|
@ -46,21 +49,21 @@ var createCmd = &cobra.Command{
|
||||||
|
|
||||||
Create Custom OAuth App Integration.
|
Create Custom OAuth App Integration.
|
||||||
|
|
||||||
You can retrieve the custom oauth app integration via :method:get.`,
|
You can retrieve the custom oauth app integration via
|
||||||
|
:method:CustomAppIntegration/get.`,
|
||||||
|
|
||||||
Annotations: map[string]string{},
|
Annotations: map[string]string{},
|
||||||
PreRunE: root.MustAccountClient,
|
PreRunE: root.MustAccountClient,
|
||||||
RunE: func(cmd *cobra.Command, args []string) (err error) {
|
RunE: func(cmd *cobra.Command, args []string) (err error) {
|
||||||
ctx := cmd.Context()
|
ctx := cmd.Context()
|
||||||
a := root.AccountClient(ctx)
|
a := root.AccountClient(ctx)
|
||||||
|
if cmd.Flags().Changed("json") {
|
||||||
err = createJson.Unmarshal(&createReq)
|
err = createJson.Unmarshal(&createReq)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
createReq.Name = args[0]
|
} else {
|
||||||
_, err = fmt.Sscan(args[1], &createReq.RedirectUrls)
|
return fmt.Errorf("please provide command input in JSON format by specifying the --json flag")
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("invalid REDIRECT_URLS: %s", args[1])
|
|
||||||
}
|
}
|
||||||
|
|
||||||
response, err := a.CustomAppIntegration.Create(ctx, createReq)
|
response, err := a.CustomAppIntegration.Create(ctx, createReq)
|
||||||
|
@ -69,15 +72,20 @@ var createCmd = &cobra.Command{
|
||||||
}
|
}
|
||||||
return cmdio.Render(ctx, response)
|
return cmdio.Render(ctx, response)
|
||||||
},
|
},
|
||||||
|
// Disable completions since they are not applicable.
|
||||||
|
// Can be overridden by manual implementation in `override.go`.
|
||||||
|
ValidArgsFunction: cobra.NoFileCompletions,
|
||||||
}
|
}
|
||||||
|
|
||||||
// start delete command
|
// start delete command
|
||||||
|
|
||||||
var deleteReq oauth2.DeleteCustomAppIntegrationRequest
|
var deleteReq oauth2.DeleteCustomAppIntegrationRequest
|
||||||
|
var deleteJson flags.JsonFlag
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
Cmd.AddCommand(deleteCmd)
|
Cmd.AddCommand(deleteCmd)
|
||||||
// TODO: short flags
|
// TODO: short flags
|
||||||
|
deleteCmd.Flags().Var(&deleteJson, "json", `either inline JSON string or @path/to/file.json with request body`)
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -87,15 +95,28 @@ var deleteCmd = &cobra.Command{
|
||||||
Long: `Delete Custom OAuth App Integration.
|
Long: `Delete Custom OAuth App Integration.
|
||||||
|
|
||||||
Delete an existing Custom OAuth App Integration. You can retrieve the custom
|
Delete an existing Custom OAuth App Integration. You can retrieve the custom
|
||||||
oauth app integration via :method:get.`,
|
oauth app integration via :method:CustomAppIntegration/get.`,
|
||||||
|
|
||||||
Annotations: map[string]string{},
|
Annotations: map[string]string{},
|
||||||
Args: cobra.ExactArgs(1),
|
Args: func(cmd *cobra.Command, args []string) error {
|
||||||
|
check := cobra.ExactArgs(1)
|
||||||
|
if cmd.Flags().Changed("json") {
|
||||||
|
check = cobra.ExactArgs(0)
|
||||||
|
}
|
||||||
|
return check(cmd, args)
|
||||||
|
},
|
||||||
PreRunE: root.MustAccountClient,
|
PreRunE: root.MustAccountClient,
|
||||||
RunE: func(cmd *cobra.Command, args []string) (err error) {
|
RunE: func(cmd *cobra.Command, args []string) (err error) {
|
||||||
ctx := cmd.Context()
|
ctx := cmd.Context()
|
||||||
a := root.AccountClient(ctx)
|
a := root.AccountClient(ctx)
|
||||||
|
if cmd.Flags().Changed("json") {
|
||||||
|
err = deleteJson.Unmarshal(&deleteReq)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
deleteReq.IntegrationId = args[0]
|
deleteReq.IntegrationId = args[0]
|
||||||
|
}
|
||||||
|
|
||||||
err = a.CustomAppIntegration.Delete(ctx, deleteReq)
|
err = a.CustomAppIntegration.Delete(ctx, deleteReq)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -103,15 +124,20 @@ var deleteCmd = &cobra.Command{
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
},
|
},
|
||||||
|
// Disable completions since they are not applicable.
|
||||||
|
// Can be overridden by manual implementation in `override.go`.
|
||||||
|
ValidArgsFunction: cobra.NoFileCompletions,
|
||||||
}
|
}
|
||||||
|
|
||||||
// start get command
|
// start get command
|
||||||
|
|
||||||
var getReq oauth2.GetCustomAppIntegrationRequest
|
var getReq oauth2.GetCustomAppIntegrationRequest
|
||||||
|
var getJson flags.JsonFlag
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
Cmd.AddCommand(getCmd)
|
Cmd.AddCommand(getCmd)
|
||||||
// TODO: short flags
|
// TODO: short flags
|
||||||
|
getCmd.Flags().Var(&getJson, "json", `either inline JSON string or @path/to/file.json with request body`)
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -123,12 +149,25 @@ var getCmd = &cobra.Command{
|
||||||
Gets the Custom OAuth App Integration for the given integration id.`,
|
Gets the Custom OAuth App Integration for the given integration id.`,
|
||||||
|
|
||||||
Annotations: map[string]string{},
|
Annotations: map[string]string{},
|
||||||
Args: cobra.ExactArgs(1),
|
Args: func(cmd *cobra.Command, args []string) error {
|
||||||
|
check := cobra.ExactArgs(1)
|
||||||
|
if cmd.Flags().Changed("json") {
|
||||||
|
check = cobra.ExactArgs(0)
|
||||||
|
}
|
||||||
|
return check(cmd, args)
|
||||||
|
},
|
||||||
PreRunE: root.MustAccountClient,
|
PreRunE: root.MustAccountClient,
|
||||||
RunE: func(cmd *cobra.Command, args []string) (err error) {
|
RunE: func(cmd *cobra.Command, args []string) (err error) {
|
||||||
ctx := cmd.Context()
|
ctx := cmd.Context()
|
||||||
a := root.AccountClient(ctx)
|
a := root.AccountClient(ctx)
|
||||||
|
if cmd.Flags().Changed("json") {
|
||||||
|
err = getJson.Unmarshal(&getReq)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
getReq.IntegrationId = args[0]
|
getReq.IntegrationId = args[0]
|
||||||
|
}
|
||||||
|
|
||||||
response, err := a.CustomAppIntegration.Get(ctx, getReq)
|
response, err := a.CustomAppIntegration.Get(ctx, getReq)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -136,6 +175,9 @@ var getCmd = &cobra.Command{
|
||||||
}
|
}
|
||||||
return cmdio.Render(ctx, response)
|
return cmdio.Render(ctx, response)
|
||||||
},
|
},
|
||||||
|
// Disable completions since they are not applicable.
|
||||||
|
// Can be overridden by manual implementation in `override.go`.
|
||||||
|
ValidArgsFunction: cobra.NoFileCompletions,
|
||||||
}
|
}
|
||||||
|
|
||||||
// start list command
|
// start list command
|
||||||
|
@ -151,7 +193,7 @@ var listCmd = &cobra.Command{
|
||||||
Long: `Get custom oauth app integrations.
|
Long: `Get custom oauth app integrations.
|
||||||
|
|
||||||
Get the list of custom oauth app integrations for the specified Databricks
|
Get the list of custom oauth app integrations for the specified Databricks
|
||||||
Account`,
|
account`,
|
||||||
|
|
||||||
Annotations: map[string]string{},
|
Annotations: map[string]string{},
|
||||||
PreRunE: root.MustAccountClient,
|
PreRunE: root.MustAccountClient,
|
||||||
|
@ -164,6 +206,9 @@ var listCmd = &cobra.Command{
|
||||||
}
|
}
|
||||||
return cmdio.Render(ctx, response)
|
return cmdio.Render(ctx, response)
|
||||||
},
|
},
|
||||||
|
// Disable completions since they are not applicable.
|
||||||
|
// Can be overridden by manual implementation in `override.go`.
|
||||||
|
ValidArgsFunction: cobra.NoFileCompletions,
|
||||||
}
|
}
|
||||||
|
|
||||||
// start update command
|
// start update command
|
||||||
|
@ -182,23 +227,33 @@ func init() {
|
||||||
}
|
}
|
||||||
|
|
||||||
var updateCmd = &cobra.Command{
|
var updateCmd = &cobra.Command{
|
||||||
Use: "update",
|
Use: "update INTEGRATION_ID",
|
||||||
Short: `Updates Custom OAuth App Integration.`,
|
Short: `Updates Custom OAuth App Integration.`,
|
||||||
Long: `Updates Custom OAuth App Integration.
|
Long: `Updates Custom OAuth App Integration.
|
||||||
|
|
||||||
Updates an existing custom OAuth App Integration. You can retrieve the custom
|
Updates an existing custom OAuth App Integration. You can retrieve the custom
|
||||||
oauth app integration via :method:get.`,
|
oauth app integration via :method:CustomAppIntegration/get.`,
|
||||||
|
|
||||||
Annotations: map[string]string{},
|
Annotations: map[string]string{},
|
||||||
|
Args: func(cmd *cobra.Command, args []string) error {
|
||||||
|
check := cobra.ExactArgs(1)
|
||||||
|
if cmd.Flags().Changed("json") {
|
||||||
|
check = cobra.ExactArgs(0)
|
||||||
|
}
|
||||||
|
return check(cmd, args)
|
||||||
|
},
|
||||||
PreRunE: root.MustAccountClient,
|
PreRunE: root.MustAccountClient,
|
||||||
RunE: func(cmd *cobra.Command, args []string) (err error) {
|
RunE: func(cmd *cobra.Command, args []string) (err error) {
|
||||||
ctx := cmd.Context()
|
ctx := cmd.Context()
|
||||||
a := root.AccountClient(ctx)
|
a := root.AccountClient(ctx)
|
||||||
|
if cmd.Flags().Changed("json") {
|
||||||
err = updateJson.Unmarshal(&updateReq)
|
err = updateJson.Unmarshal(&updateReq)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
updateReq.IntegrationId = args[0]
|
updateReq.IntegrationId = args[0]
|
||||||
|
}
|
||||||
|
|
||||||
err = a.CustomAppIntegration.Update(ctx, updateReq)
|
err = a.CustomAppIntegration.Update(ctx, updateReq)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -206,6 +261,9 @@ var updateCmd = &cobra.Command{
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
},
|
},
|
||||||
|
// Disable completions since they are not applicable.
|
||||||
|
// Can be overridden by manual implementation in `override.go`.
|
||||||
|
ValidArgsFunction: cobra.NoFileCompletions,
|
||||||
}
|
}
|
||||||
|
|
||||||
// end service CustomAppIntegration
|
// end service CustomAppIntegration
|
||||||
|
|
|
@ -31,6 +31,9 @@ var Cmd = &cobra.Command{
|
||||||
encryption requires that the workspace is on the E2 version of the platform.
|
encryption requires that the workspace is on the E2 version of the platform.
|
||||||
If you have an older workspace, it might not be on the E2 version of the
|
If you have an older workspace, it might not be on the E2 version of the
|
||||||
platform. If you are not sure, contact your Databricks representative.`,
|
platform. If you are not sure, contact your Databricks representative.`,
|
||||||
|
Annotations: map[string]string{
|
||||||
|
"package": "provisioning",
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
// start create command
|
// start create command
|
||||||
|
@ -43,6 +46,9 @@ func init() {
|
||||||
// TODO: short flags
|
// TODO: short flags
|
||||||
createCmd.Flags().Var(&createJson, "json", `either inline JSON string or @path/to/file.json with request body`)
|
createCmd.Flags().Var(&createJson, "json", `either inline JSON string or @path/to/file.json with request body`)
|
||||||
|
|
||||||
|
// TODO: complex arg: aws_key_info
|
||||||
|
// TODO: complex arg: gcp_key_info
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var createCmd = &cobra.Command{
|
var createCmd = &cobra.Command{
|
||||||
|
@ -61,7 +67,8 @@ var createCmd = &cobra.Command{
|
||||||
EBS volume data.
|
EBS volume data.
|
||||||
|
|
||||||
**Important**: Customer-managed keys are supported only for some deployment
|
**Important**: Customer-managed keys are supported only for some deployment
|
||||||
types, subscription types, and AWS regions.
|
types, subscription types, and AWS regions that currently support creation of
|
||||||
|
Databricks workspaces.
|
||||||
|
|
||||||
This operation is available only if your account is on the E2 version of the
|
This operation is available only if your account is on the E2 version of the
|
||||||
platform or on a select custom plan that allows multiple workspaces per
|
platform or on a select custom plan that allows multiple workspaces per
|
||||||
|
@ -72,17 +79,13 @@ var createCmd = &cobra.Command{
|
||||||
RunE: func(cmd *cobra.Command, args []string) (err error) {
|
RunE: func(cmd *cobra.Command, args []string) (err error) {
|
||||||
ctx := cmd.Context()
|
ctx := cmd.Context()
|
||||||
a := root.AccountClient(ctx)
|
a := root.AccountClient(ctx)
|
||||||
|
if cmd.Flags().Changed("json") {
|
||||||
err = createJson.Unmarshal(&createReq)
|
err = createJson.Unmarshal(&createReq)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
_, err = fmt.Sscan(args[0], &createReq.AwsKeyInfo)
|
} else {
|
||||||
if err != nil {
|
return fmt.Errorf("please provide command input in JSON format by specifying the --json flag")
|
||||||
return fmt.Errorf("invalid AWS_KEY_INFO: %s", args[0])
|
|
||||||
}
|
|
||||||
_, err = fmt.Sscan(args[1], &createReq.UseCases)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("invalid USE_CASES: %s", args[1])
|
|
||||||
}
|
}
|
||||||
|
|
||||||
response, err := a.EncryptionKeys.Create(ctx, createReq)
|
response, err := a.EncryptionKeys.Create(ctx, createReq)
|
||||||
|
@ -91,15 +94,20 @@ var createCmd = &cobra.Command{
|
||||||
}
|
}
|
||||||
return cmdio.Render(ctx, response)
|
return cmdio.Render(ctx, response)
|
||||||
},
|
},
|
||||||
|
// Disable completions since they are not applicable.
|
||||||
|
// Can be overridden by manual implementation in `override.go`.
|
||||||
|
ValidArgsFunction: cobra.NoFileCompletions,
|
||||||
}
|
}
|
||||||
|
|
||||||
// start delete command
|
// start delete command
|
||||||
|
|
||||||
var deleteReq provisioning.DeleteEncryptionKeyRequest
|
var deleteReq provisioning.DeleteEncryptionKeyRequest
|
||||||
|
var deleteJson flags.JsonFlag
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
Cmd.AddCommand(deleteCmd)
|
Cmd.AddCommand(deleteCmd)
|
||||||
// TODO: short flags
|
// TODO: short flags
|
||||||
|
deleteCmd.Flags().Var(&deleteJson, "json", `either inline JSON string or @path/to/file.json with request body`)
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -112,25 +120,25 @@ var deleteCmd = &cobra.Command{
|
||||||
delete a configuration that is associated with a running workspace.`,
|
delete a configuration that is associated with a running workspace.`,
|
||||||
|
|
||||||
Annotations: map[string]string{},
|
Annotations: map[string]string{},
|
||||||
|
Args: func(cmd *cobra.Command, args []string) error {
|
||||||
|
check := cobra.ExactArgs(1)
|
||||||
|
if cmd.Flags().Changed("json") {
|
||||||
|
check = cobra.ExactArgs(0)
|
||||||
|
}
|
||||||
|
return check(cmd, args)
|
||||||
|
},
|
||||||
PreRunE: root.MustAccountClient,
|
PreRunE: root.MustAccountClient,
|
||||||
RunE: func(cmd *cobra.Command, args []string) (err error) {
|
RunE: func(cmd *cobra.Command, args []string) (err error) {
|
||||||
ctx := cmd.Context()
|
ctx := cmd.Context()
|
||||||
a := root.AccountClient(ctx)
|
a := root.AccountClient(ctx)
|
||||||
if len(args) == 0 {
|
if cmd.Flags().Changed("json") {
|
||||||
names, err := a.EncryptionKeys.CustomerManagedKeyAwsKeyInfoKeyArnToCustomerManagedKeyIdMap(ctx)
|
err = deleteJson.Unmarshal(&deleteReq)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
id, err := cmdio.Select(ctx, names, "Databricks encryption key configuration ID")
|
} else {
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
args = append(args, id)
|
|
||||||
}
|
|
||||||
if len(args) != 1 {
|
|
||||||
return fmt.Errorf("expected to have databricks encryption key configuration id")
|
|
||||||
}
|
|
||||||
deleteReq.CustomerManagedKeyId = args[0]
|
deleteReq.CustomerManagedKeyId = args[0]
|
||||||
|
}
|
||||||
|
|
||||||
err = a.EncryptionKeys.Delete(ctx, deleteReq)
|
err = a.EncryptionKeys.Delete(ctx, deleteReq)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -138,15 +146,20 @@ var deleteCmd = &cobra.Command{
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
},
|
},
|
||||||
|
// Disable completions since they are not applicable.
|
||||||
|
// Can be overridden by manual implementation in `override.go`.
|
||||||
|
ValidArgsFunction: cobra.NoFileCompletions,
|
||||||
}
|
}
|
||||||
|
|
||||||
// start get command
|
// start get command
|
||||||
|
|
||||||
var getReq provisioning.GetEncryptionKeyRequest
|
var getReq provisioning.GetEncryptionKeyRequest
|
||||||
|
var getJson flags.JsonFlag
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
Cmd.AddCommand(getCmd)
|
Cmd.AddCommand(getCmd)
|
||||||
// TODO: short flags
|
// TODO: short flags
|
||||||
|
getCmd.Flags().Var(&getJson, "json", `either inline JSON string or @path/to/file.json with request body`)
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -169,28 +182,28 @@ var getCmd = &cobra.Command{
|
||||||
types, subscription types, and AWS regions.
|
types, subscription types, and AWS regions.
|
||||||
|
|
||||||
This operation is available only if your account is on the E2 version of the
|
This operation is available only if your account is on the E2 version of the
|
||||||
platform.`,
|
platform.",`,
|
||||||
|
|
||||||
Annotations: map[string]string{},
|
Annotations: map[string]string{},
|
||||||
|
Args: func(cmd *cobra.Command, args []string) error {
|
||||||
|
check := cobra.ExactArgs(1)
|
||||||
|
if cmd.Flags().Changed("json") {
|
||||||
|
check = cobra.ExactArgs(0)
|
||||||
|
}
|
||||||
|
return check(cmd, args)
|
||||||
|
},
|
||||||
PreRunE: root.MustAccountClient,
|
PreRunE: root.MustAccountClient,
|
||||||
RunE: func(cmd *cobra.Command, args []string) (err error) {
|
RunE: func(cmd *cobra.Command, args []string) (err error) {
|
||||||
ctx := cmd.Context()
|
ctx := cmd.Context()
|
||||||
a := root.AccountClient(ctx)
|
a := root.AccountClient(ctx)
|
||||||
if len(args) == 0 {
|
if cmd.Flags().Changed("json") {
|
||||||
names, err := a.EncryptionKeys.CustomerManagedKeyAwsKeyInfoKeyArnToCustomerManagedKeyIdMap(ctx)
|
err = getJson.Unmarshal(&getReq)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
id, err := cmdio.Select(ctx, names, "Databricks encryption key configuration ID")
|
} else {
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
args = append(args, id)
|
|
||||||
}
|
|
||||||
if len(args) != 1 {
|
|
||||||
return fmt.Errorf("expected to have databricks encryption key configuration id")
|
|
||||||
}
|
|
||||||
getReq.CustomerManagedKeyId = args[0]
|
getReq.CustomerManagedKeyId = args[0]
|
||||||
|
}
|
||||||
|
|
||||||
response, err := a.EncryptionKeys.Get(ctx, getReq)
|
response, err := a.EncryptionKeys.Get(ctx, getReq)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -198,6 +211,9 @@ var getCmd = &cobra.Command{
|
||||||
}
|
}
|
||||||
return cmdio.Render(ctx, response)
|
return cmdio.Render(ctx, response)
|
||||||
},
|
},
|
||||||
|
// Disable completions since they are not applicable.
|
||||||
|
// Can be overridden by manual implementation in `override.go`.
|
||||||
|
ValidArgsFunction: cobra.NoFileCompletions,
|
||||||
}
|
}
|
||||||
|
|
||||||
// start list command
|
// start list command
|
||||||
|
@ -237,6 +253,9 @@ var listCmd = &cobra.Command{
|
||||||
}
|
}
|
||||||
return cmdio.Render(ctx, response)
|
return cmdio.Render(ctx, response)
|
||||||
},
|
},
|
||||||
|
// Disable completions since they are not applicable.
|
||||||
|
// Can be overridden by manual implementation in `override.go`.
|
||||||
|
ValidArgsFunction: cobra.NoFileCompletions,
|
||||||
}
|
}
|
||||||
|
|
||||||
// end service EncryptionKeys
|
// end service EncryptionKeys
|
||||||
|
|
|
@ -0,0 +1,42 @@
|
||||||
|
package account
|
||||||
|
|
||||||
|
import "github.com/spf13/cobra"
|
||||||
|
|
||||||
|
// Groups returns an ordered list of command groups.
|
||||||
|
// The order matches the order used in the Databricks API explorer.
|
||||||
|
func Groups() []cobra.Group {
|
||||||
|
return []cobra.Group{
|
||||||
|
{
|
||||||
|
ID: "iam",
|
||||||
|
Title: "Identity and Access Management",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ID: "catalog",
|
||||||
|
Title: "Unity Catalog",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ID: "settings",
|
||||||
|
Title: "Settings",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ID: "provisioning",
|
||||||
|
Title: "Provisioning",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ID: "billing",
|
||||||
|
Title: "Billing",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ID: "oauth2",
|
||||||
|
Title: "OAuth",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
// Register groups with parent command
|
||||||
|
groups := Groups()
|
||||||
|
for i := range groups {
|
||||||
|
accountCmd.AddGroup(&groups[i])
|
||||||
|
}
|
||||||
|
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue