mirror of https://github.com/databricks/cli.git
Merge branch 'main' into jobs-deserialization-test
This commit is contained in:
commit
d6fe2b81ca
|
@ -1 +1 @@
|
||||||
a7a9dc025bb80303e676bf3708942c6aa06689f1
|
7437dabb9dadee402c1fc060df4c1ce8cc5369f0
|
|
@ -7,7 +7,7 @@ package account
|
||||||
import (
|
import (
|
||||||
"github.com/databricks/cli/cmd/root"
|
"github.com/databricks/cli/cmd/root"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
{{range .Services}}{{if .IsAccounts}}{{if not (in $excludes .KebabName) }}
|
{{range .Services}}{{if and .IsAccounts (not .HasParent) (not .IsDataPlane)}}{{if not (in $excludes .KebabName) }}
|
||||||
{{.SnakeName}} "github.com/databricks/cli/cmd/account/{{(.TrimPrefix "account").KebabName}}"{{end}}{{end}}{{end}}
|
{{.SnakeName}} "github.com/databricks/cli/cmd/account/{{(.TrimPrefix "account").KebabName}}"{{end}}{{end}}{{end}}
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -17,7 +17,7 @@ func New() *cobra.Command {
|
||||||
Short: `Databricks Account Commands`,
|
Short: `Databricks Account Commands`,
|
||||||
}
|
}
|
||||||
|
|
||||||
{{range .Services}}{{if .IsAccounts}}{{if not (in $excludes .KebabName) -}}
|
{{range .Services}}{{if and .IsAccounts (not .HasParent) (not .IsDataPlane)}}{{if not (in $excludes .KebabName) -}}
|
||||||
cmd.AddCommand({{.SnakeName}}.New())
|
cmd.AddCommand({{.SnakeName}}.New())
|
||||||
{{end}}{{end}}{{end}}
|
{{end}}{{end}}{{end}}
|
||||||
|
|
||||||
|
|
|
@ -14,14 +14,14 @@ package workspace
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/databricks/cli/cmd/root"
|
"github.com/databricks/cli/cmd/root"
|
||||||
{{range .Services}}{{if not .IsAccounts}}{{if not (in $excludes .KebabName) }}
|
{{range .Services}}{{if and (not .IsAccounts) (not .HasParent) (not .IsDataPlane)}}{{if not (in $excludes .KebabName) }}
|
||||||
{{.SnakeName}} "github.com/databricks/cli/cmd/workspace/{{.KebabName}}"{{end}}{{end}}{{end}}
|
{{.SnakeName}} "github.com/databricks/cli/cmd/workspace/{{.KebabName}}"{{end}}{{end}}{{end}}
|
||||||
)
|
)
|
||||||
|
|
||||||
func All() []*cobra.Command {
|
func All() []*cobra.Command {
|
||||||
var out []*cobra.Command
|
var out []*cobra.Command
|
||||||
|
|
||||||
{{range .Services}}{{if not .IsAccounts}}{{if not (in $excludes .KebabName) -}}
|
{{range .Services}}{{if and (not .IsAccounts) (not .HasParent) (not .IsDataPlane)}}{{if not (in $excludes .KebabName) -}}
|
||||||
out = append(out, {{.SnakeName}}.New())
|
out = append(out, {{.SnakeName}}.New())
|
||||||
{{end}}{{end}}{{end}}
|
{{end}}{{end}}{{end}}
|
||||||
|
|
||||||
|
|
|
@ -18,6 +18,11 @@ package variable
|
||||||
"warehouses"
|
"warehouses"
|
||||||
}}
|
}}
|
||||||
|
|
||||||
|
{{ $customField :=
|
||||||
|
dict
|
||||||
|
"service-principals" "ApplicationId"
|
||||||
|
}}
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
@ -116,15 +121,10 @@ func allResolvers() *resolvers {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
return fmt.Sprint(entity{{ template "field-path" .List.NamedIdMap.IdPath }}), nil
|
return fmt.Sprint(entity.{{ getOrDefault $customField .KebabName ((index .List.NamedIdMap.IdPath 0).PascalName) }}), nil
|
||||||
}
|
}
|
||||||
{{end -}}
|
{{end -}}
|
||||||
{{- end}}
|
{{- end}}
|
||||||
|
|
||||||
return r
|
return r
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
{{- define "field-path" -}}
|
|
||||||
{{- range .}}.{{.PascalName}}{{end}}
|
|
||||||
{{- end -}}
|
|
||||||
|
|
|
@ -8,6 +8,10 @@ import (
|
||||||
"github.com/databricks/cli/cmd/root"
|
"github.com/databricks/cli/cmd/root"
|
||||||
"github.com/databricks/databricks-sdk-go/service/{{.Package.Name}}"
|
"github.com/databricks/databricks-sdk-go/service/{{.Package.Name}}"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
|
|
||||||
|
{{range .Subservices -}}
|
||||||
|
{{.SnakeName}} "github.com/databricks/cli/cmd/{{ if .ParentService.IsAccounts }}account{{ else }}workspace{{ end }}/{{.KebabName}}"
|
||||||
|
{{end}}
|
||||||
)
|
)
|
||||||
|
|
||||||
{{ $excludes :=
|
{{ $excludes :=
|
||||||
|
@ -18,6 +22,7 @@ import (
|
||||||
"dbsql-permissions"
|
"dbsql-permissions"
|
||||||
"account-access-control-proxy"
|
"account-access-control-proxy"
|
||||||
"files"
|
"files"
|
||||||
|
"serving-endpoints-data-plane"
|
||||||
}}
|
}}
|
||||||
|
|
||||||
{{if not (in $excludes .KebabName) }}
|
{{if not (in $excludes .KebabName) }}
|
||||||
|
@ -34,6 +39,9 @@ import (
|
||||||
]{{end}}{{end}}
|
]{{end}}{{end}}
|
||||||
|
|
||||||
{{define "service"}}
|
{{define "service"}}
|
||||||
|
{{- $excludeMethods := list "put-secret" -}}
|
||||||
|
{{- $hideService := .IsPrivatePreview }}
|
||||||
|
|
||||||
// Slice with functions to override default command behavior.
|
// Slice with functions to override default command behavior.
|
||||||
// Functions can be added from the `init()` function in manually curated files in this directory.
|
// Functions can be added from the `init()` function in manually curated files in this directory.
|
||||||
var cmdOverrides []func(*cobra.Command)
|
var cmdOverrides []func(*cobra.Command)
|
||||||
|
@ -45,17 +53,36 @@ func New() *cobra.Command {
|
||||||
Short: `{{.Summary | without "`"}}`,
|
Short: `{{.Summary | without "`"}}`,
|
||||||
Long: `{{.Comment " " 80 | without "`"}}`,
|
Long: `{{.Comment " " 80 | without "`"}}`,
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
{{- if not .HasParent }}
|
||||||
GroupID: "{{ .Package.Name }}",
|
GroupID: "{{ .Package.Name }}",
|
||||||
Annotations: map[string]string{
|
Annotations: map[string]string{
|
||||||
"package": "{{ .Package.Name }}",
|
"package": "{{ .Package.Name }}",
|
||||||
},
|
},
|
||||||
{{- if .IsPrivatePreview }}
|
{{- end }}
|
||||||
|
{{- if $hideService }}
|
||||||
|
|
||||||
// This service is being previewed; hide from help output.
|
// This service is being previewed; hide from help output.
|
||||||
Hidden: true,
|
Hidden: true,
|
||||||
{{- end }}
|
{{- end }}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
{{ if gt (len .Methods) 0 -}}
|
||||||
|
// Add methods
|
||||||
|
{{- range .Methods}}
|
||||||
|
{{- if in $excludeMethods .KebabName }}
|
||||||
|
{{- continue}}
|
||||||
|
{{- end}}
|
||||||
|
cmd.AddCommand(new{{.PascalName}}())
|
||||||
|
{{- end}}
|
||||||
|
{{- end}}
|
||||||
|
|
||||||
|
{{ if .HasSubservices }}
|
||||||
|
// Add subservices
|
||||||
|
{{- range .Subservices}}
|
||||||
|
cmd.AddCommand({{.SnakeName}}.New())
|
||||||
|
{{- end}}
|
||||||
|
{{- end}}
|
||||||
|
|
||||||
// Apply optional overrides to this command.
|
// Apply optional overrides to this command.
|
||||||
for _, fn := range cmdOverrides {
|
for _, fn := range cmdOverrides {
|
||||||
fn(cmd)
|
fn(cmd)
|
||||||
|
@ -67,8 +94,7 @@ func New() *cobra.Command {
|
||||||
{{- $serviceName := .KebabName -}}
|
{{- $serviceName := .KebabName -}}
|
||||||
{{range .Methods}}
|
{{range .Methods}}
|
||||||
|
|
||||||
{{- $excludes := list "put-secret" -}}
|
{{if in $excludeMethods .KebabName }}
|
||||||
{{if in $excludes .KebabName }}
|
|
||||||
{{continue}}
|
{{continue}}
|
||||||
{{end}}
|
{{end}}
|
||||||
// start {{.KebabName}} command
|
// start {{.KebabName}} command
|
||||||
|
@ -121,7 +147,14 @@ func new{{.PascalName}}() *cobra.Command {
|
||||||
{{- end}}
|
{{- end}}
|
||||||
{{end}}
|
{{end}}
|
||||||
|
|
||||||
{{- $excludeFromPrompts := list "workspace get-status" -}}
|
{{- $excludeFromPrompts := list
|
||||||
|
"workspace get-status"
|
||||||
|
"provider-exchanges get"
|
||||||
|
"provider-exchanges delete"
|
||||||
|
"provider-exchanges delete-listing-from-exchange"
|
||||||
|
"provider-exchanges list-exchanges-for-listing"
|
||||||
|
"provider-exchanges list-listings-for-exchange"
|
||||||
|
-}}
|
||||||
{{- $fullCommandName := (print $serviceName " " .KebabName) -}}
|
{{- $fullCommandName := (print $serviceName " " .KebabName) -}}
|
||||||
{{- $noPrompt := or .IsCrudCreate (in $excludeFromPrompts $fullCommandName) }}
|
{{- $noPrompt := or .IsCrudCreate (in $excludeFromPrompts $fullCommandName) }}
|
||||||
|
|
||||||
|
@ -159,7 +192,8 @@ func new{{.PascalName}}() *cobra.Command {
|
||||||
{{- end -}}
|
{{- end -}}
|
||||||
`
|
`
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- if .IsPrivatePreview }}
|
{{/* Don't hide commands if the service itself is already hidden. */}}
|
||||||
|
{{- if and (not $hideService) .IsPrivatePreview }}
|
||||||
|
|
||||||
// This command is being previewed; hide from help output.
|
// This command is being previewed; hide from help output.
|
||||||
cmd.Hidden = true
|
cmd.Hidden = true
|
||||||
|
@ -170,7 +204,7 @@ func new{{.PascalName}}() *cobra.Command {
|
||||||
cmd.Args = func(cmd *cobra.Command, args []string) error {
|
cmd.Args = func(cmd *cobra.Command, args []string) error {
|
||||||
{{- if $hasDifferentArgsWithJsonFlag }}
|
{{- if $hasDifferentArgsWithJsonFlag }}
|
||||||
if cmd.Flags().Changed("json") {
|
if cmd.Flags().Changed("json") {
|
||||||
err := cobra.ExactArgs({{len .Request.RequiredPathFields}})(cmd, args)
|
err := root.ExactArgs({{len .Request.RequiredPathFields}})(cmd, args)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
{{- if eq 0 (len .Request.RequiredPathFields) }}
|
{{- if eq 0 (len .Request.RequiredPathFields) }}
|
||||||
return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide{{- range $index, $field := .Request.RequiredFields}}{{if $index}},{{end}} '{{$field.Name}}'{{end}} in your JSON input")
|
return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide{{- range $index, $field := .Request.RequiredFields}}{{if $index}},{{end}} '{{$field.Name}}'{{end}} in your JSON input")
|
||||||
|
@ -182,7 +216,7 @@ func new{{.PascalName}}() *cobra.Command {
|
||||||
}
|
}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- if $hasRequiredArgs }}
|
{{- if $hasRequiredArgs }}
|
||||||
check := cobra.ExactArgs({{len .RequiredPositionalArguments}})
|
check := root.ExactArgs({{len .RequiredPositionalArguments}})
|
||||||
return check(cmd, args)
|
return check(cmd, args)
|
||||||
{{- else}}
|
{{- else}}
|
||||||
return nil
|
return nil
|
||||||
|
@ -242,7 +276,7 @@ func new{{.PascalName}}() *cobra.Command {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if {{.CamelName}}SkipWait {
|
if {{.CamelName}}SkipWait {
|
||||||
{{if .Response -}}
|
{{if not .Response.IsEmpty -}}
|
||||||
return cmdio.Render(ctx, wait.Response)
|
return cmdio.Render(ctx, wait.Response)
|
||||||
{{- else -}}
|
{{- else -}}
|
||||||
return nil
|
return nil
|
||||||
|
@ -291,25 +325,34 @@ func new{{.PascalName}}() *cobra.Command {
|
||||||
return cmd
|
return cmd
|
||||||
}
|
}
|
||||||
|
|
||||||
func init() {
|
|
||||||
cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) {
|
|
||||||
cmd.AddCommand(new{{.PascalName}}())
|
|
||||||
})
|
|
||||||
}
|
|
||||||
{{end}}
|
{{end}}
|
||||||
// end service {{.Name}}{{end}}
|
// end service {{.Name}}{{end}}
|
||||||
|
|
||||||
{{- define "method-call" -}}
|
{{- define "method-call" -}}
|
||||||
{{if .Response}}response, err :={{else}}err ={{end}} {{if .Service.IsAccounts}}a{{else}}w{{end}}.{{(.Service.TrimPrefix "account").PascalName}}.{{.PascalName}}{{if .Pagination}}All{{end}}(ctx{{if .Request}}, {{.CamelName}}Req{{end}})
|
{{if not .Response.IsEmpty -}}
|
||||||
|
response{{ if not .Pagination}}, err{{end}} :=
|
||||||
|
{{- else -}}
|
||||||
|
err =
|
||||||
|
{{- end}}
|
||||||
|
{{- if .Service.IsAccounts}}a{{else}}w{{end}}.
|
||||||
|
{{- if .Service.HasParent }}
|
||||||
|
{{- (.Service.ParentService.TrimPrefix "account").PascalName }}.
|
||||||
|
{{- (.Service.TrimPrefix "account").PascalName}}().
|
||||||
|
{{- else}}
|
||||||
|
{{- (.Service.TrimPrefix "account").PascalName}}.
|
||||||
|
{{- end}}
|
||||||
|
{{- .PascalName}}(ctx{{if .Request}}, {{.CamelName}}Req{{end}})
|
||||||
|
{{- if not (and .Response .Pagination) }}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
{{ if .Response -}}
|
{{- end}}
|
||||||
|
{{ if not .Response.IsEmpty -}}
|
||||||
{{- if .IsResponseByteStream -}}
|
{{- if .IsResponseByteStream -}}
|
||||||
defer response.{{.ResponseBodyField.PascalName}}.Close()
|
defer response.{{.ResponseBodyField.PascalName}}.Close()
|
||||||
return cmdio.RenderReader(ctx, response.{{.ResponseBodyField.PascalName}})
|
return cmdio.Render{{ if .Pagination}}Iterator{{end}}(ctx, response.{{.ResponseBodyField.PascalName}})
|
||||||
{{- else -}}
|
{{- else -}}
|
||||||
return cmdio.Render(ctx, response)
|
return cmdio.Render{{ if .Pagination}}Iterator{{end}}(ctx, response)
|
||||||
{{- end -}}
|
{{- end -}}
|
||||||
{{ else -}}
|
{{ else -}}
|
||||||
return nil
|
return nil
|
||||||
|
|
|
@ -4,8 +4,10 @@ cmd/account/billable-usage/billable-usage.go linguist-generated=true
|
||||||
cmd/account/budgets/budgets.go linguist-generated=true
|
cmd/account/budgets/budgets.go linguist-generated=true
|
||||||
cmd/account/cmd.go linguist-generated=true
|
cmd/account/cmd.go linguist-generated=true
|
||||||
cmd/account/credentials/credentials.go linguist-generated=true
|
cmd/account/credentials/credentials.go linguist-generated=true
|
||||||
|
cmd/account/csp-enablement-account/csp-enablement-account.go linguist-generated=true
|
||||||
cmd/account/custom-app-integration/custom-app-integration.go linguist-generated=true
|
cmd/account/custom-app-integration/custom-app-integration.go linguist-generated=true
|
||||||
cmd/account/encryption-keys/encryption-keys.go linguist-generated=true
|
cmd/account/encryption-keys/encryption-keys.go linguist-generated=true
|
||||||
|
cmd/account/esm-enablement-account/esm-enablement-account.go linguist-generated=true
|
||||||
cmd/account/groups/groups.go linguist-generated=true
|
cmd/account/groups/groups.go linguist-generated=true
|
||||||
cmd/account/ip-access-lists/ip-access-lists.go linguist-generated=true
|
cmd/account/ip-access-lists/ip-access-lists.go linguist-generated=true
|
||||||
cmd/account/log-delivery/log-delivery.go linguist-generated=true
|
cmd/account/log-delivery/log-delivery.go linguist-generated=true
|
||||||
|
@ -14,6 +16,7 @@ cmd/account/metastores/metastores.go linguist-generated=true
|
||||||
cmd/account/network-connectivity/network-connectivity.go linguist-generated=true
|
cmd/account/network-connectivity/network-connectivity.go linguist-generated=true
|
||||||
cmd/account/networks/networks.go linguist-generated=true
|
cmd/account/networks/networks.go linguist-generated=true
|
||||||
cmd/account/o-auth-published-apps/o-auth-published-apps.go linguist-generated=true
|
cmd/account/o-auth-published-apps/o-auth-published-apps.go linguist-generated=true
|
||||||
|
cmd/account/personal-compute/personal-compute.go linguist-generated=true
|
||||||
cmd/account/private-access/private-access.go linguist-generated=true
|
cmd/account/private-access/private-access.go linguist-generated=true
|
||||||
cmd/account/published-app-integration/published-app-integration.go linguist-generated=true
|
cmd/account/published-app-integration/published-app-integration.go linguist-generated=true
|
||||||
cmd/account/service-principal-secrets/service-principal-secrets.go linguist-generated=true
|
cmd/account/service-principal-secrets/service-principal-secrets.go linguist-generated=true
|
||||||
|
@ -28,17 +31,26 @@ cmd/account/workspaces/workspaces.go linguist-generated=true
|
||||||
cmd/workspace/alerts/alerts.go linguist-generated=true
|
cmd/workspace/alerts/alerts.go linguist-generated=true
|
||||||
cmd/workspace/apps/apps.go linguist-generated=true
|
cmd/workspace/apps/apps.go linguist-generated=true
|
||||||
cmd/workspace/artifact-allowlists/artifact-allowlists.go linguist-generated=true
|
cmd/workspace/artifact-allowlists/artifact-allowlists.go linguist-generated=true
|
||||||
|
cmd/workspace/automatic-cluster-update/automatic-cluster-update.go linguist-generated=true
|
||||||
cmd/workspace/catalogs/catalogs.go linguist-generated=true
|
cmd/workspace/catalogs/catalogs.go linguist-generated=true
|
||||||
cmd/workspace/clean-rooms/clean-rooms.go linguist-generated=true
|
cmd/workspace/clean-rooms/clean-rooms.go linguist-generated=true
|
||||||
cmd/workspace/cluster-policies/cluster-policies.go linguist-generated=true
|
cmd/workspace/cluster-policies/cluster-policies.go linguist-generated=true
|
||||||
cmd/workspace/clusters/clusters.go linguist-generated=true
|
cmd/workspace/clusters/clusters.go linguist-generated=true
|
||||||
cmd/workspace/cmd.go linguist-generated=true
|
cmd/workspace/cmd.go linguist-generated=true
|
||||||
|
cmd/workspace/compliance-security-profile/compliance-security-profile.go linguist-generated=true
|
||||||
cmd/workspace/connections/connections.go linguist-generated=true
|
cmd/workspace/connections/connections.go linguist-generated=true
|
||||||
|
cmd/workspace/consumer-fulfillments/consumer-fulfillments.go linguist-generated=true
|
||||||
|
cmd/workspace/consumer-installations/consumer-installations.go linguist-generated=true
|
||||||
|
cmd/workspace/consumer-listings/consumer-listings.go linguist-generated=true
|
||||||
|
cmd/workspace/consumer-personalization-requests/consumer-personalization-requests.go linguist-generated=true
|
||||||
|
cmd/workspace/consumer-providers/consumer-providers.go linguist-generated=true
|
||||||
cmd/workspace/credentials-manager/credentials-manager.go linguist-generated=true
|
cmd/workspace/credentials-manager/credentials-manager.go linguist-generated=true
|
||||||
cmd/workspace/current-user/current-user.go linguist-generated=true
|
cmd/workspace/current-user/current-user.go linguist-generated=true
|
||||||
cmd/workspace/dashboard-widgets/dashboard-widgets.go linguist-generated=true
|
cmd/workspace/dashboard-widgets/dashboard-widgets.go linguist-generated=true
|
||||||
cmd/workspace/dashboards/dashboards.go linguist-generated=true
|
cmd/workspace/dashboards/dashboards.go linguist-generated=true
|
||||||
cmd/workspace/data-sources/data-sources.go linguist-generated=true
|
cmd/workspace/data-sources/data-sources.go linguist-generated=true
|
||||||
|
cmd/workspace/default-namespace/default-namespace.go linguist-generated=true
|
||||||
|
cmd/workspace/enhanced-security-monitoring/enhanced-security-monitoring.go linguist-generated=true
|
||||||
cmd/workspace/experiments/experiments.go linguist-generated=true
|
cmd/workspace/experiments/experiments.go linguist-generated=true
|
||||||
cmd/workspace/external-locations/external-locations.go linguist-generated=true
|
cmd/workspace/external-locations/external-locations.go linguist-generated=true
|
||||||
cmd/workspace/functions/functions.go linguist-generated=true
|
cmd/workspace/functions/functions.go linguist-generated=true
|
||||||
|
@ -55,10 +67,20 @@ cmd/workspace/libraries/libraries.go linguist-generated=true
|
||||||
cmd/workspace/metastores/metastores.go linguist-generated=true
|
cmd/workspace/metastores/metastores.go linguist-generated=true
|
||||||
cmd/workspace/model-registry/model-registry.go linguist-generated=true
|
cmd/workspace/model-registry/model-registry.go linguist-generated=true
|
||||||
cmd/workspace/model-versions/model-versions.go linguist-generated=true
|
cmd/workspace/model-versions/model-versions.go linguist-generated=true
|
||||||
|
cmd/workspace/online-tables/online-tables.go linguist-generated=true
|
||||||
|
cmd/workspace/permission-migration/permission-migration.go linguist-generated=true
|
||||||
cmd/workspace/permissions/permissions.go linguist-generated=true
|
cmd/workspace/permissions/permissions.go linguist-generated=true
|
||||||
cmd/workspace/pipelines/pipelines.go linguist-generated=true
|
cmd/workspace/pipelines/pipelines.go linguist-generated=true
|
||||||
cmd/workspace/policy-families/policy-families.go linguist-generated=true
|
cmd/workspace/policy-families/policy-families.go linguist-generated=true
|
||||||
|
cmd/workspace/provider-exchange-filters/provider-exchange-filters.go linguist-generated=true
|
||||||
|
cmd/workspace/provider-exchanges/provider-exchanges.go linguist-generated=true
|
||||||
|
cmd/workspace/provider-files/provider-files.go linguist-generated=true
|
||||||
|
cmd/workspace/provider-listings/provider-listings.go linguist-generated=true
|
||||||
|
cmd/workspace/provider-personalization-requests/provider-personalization-requests.go linguist-generated=true
|
||||||
|
cmd/workspace/provider-provider-analytics-dashboards/provider-provider-analytics-dashboards.go linguist-generated=true
|
||||||
|
cmd/workspace/provider-providers/provider-providers.go linguist-generated=true
|
||||||
cmd/workspace/providers/providers.go linguist-generated=true
|
cmd/workspace/providers/providers.go linguist-generated=true
|
||||||
|
cmd/workspace/quality-monitors/quality-monitors.go linguist-generated=true
|
||||||
cmd/workspace/queries/queries.go linguist-generated=true
|
cmd/workspace/queries/queries.go linguist-generated=true
|
||||||
cmd/workspace/query-history/query-history.go linguist-generated=true
|
cmd/workspace/query-history/query-history.go linguist-generated=true
|
||||||
cmd/workspace/query-visualizations/query-visualizations.go linguist-generated=true
|
cmd/workspace/query-visualizations/query-visualizations.go linguist-generated=true
|
||||||
|
@ -66,6 +88,7 @@ cmd/workspace/recipient-activation/recipient-activation.go linguist-generated=tr
|
||||||
cmd/workspace/recipients/recipients.go linguist-generated=true
|
cmd/workspace/recipients/recipients.go linguist-generated=true
|
||||||
cmd/workspace/registered-models/registered-models.go linguist-generated=true
|
cmd/workspace/registered-models/registered-models.go linguist-generated=true
|
||||||
cmd/workspace/repos/repos.go linguist-generated=true
|
cmd/workspace/repos/repos.go linguist-generated=true
|
||||||
|
cmd/workspace/restrict-workspace-admins/restrict-workspace-admins.go linguist-generated=true
|
||||||
cmd/workspace/schemas/schemas.go linguist-generated=true
|
cmd/workspace/schemas/schemas.go linguist-generated=true
|
||||||
cmd/workspace/secrets/secrets.go linguist-generated=true
|
cmd/workspace/secrets/secrets.go linguist-generated=true
|
||||||
cmd/workspace/service-principals/service-principals.go linguist-generated=true
|
cmd/workspace/service-principals/service-principals.go linguist-generated=true
|
||||||
|
|
|
@ -0,0 +1,16 @@
|
||||||
|
name: publish-winget
|
||||||
|
|
||||||
|
on:
|
||||||
|
workflow_dispatch:
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
publish-to-winget-pkgs:
|
||||||
|
runs-on: windows-latest
|
||||||
|
environment: release
|
||||||
|
steps:
|
||||||
|
- uses: vedantmgoyal2009/winget-releaser@93fd8b606a1672ec3e5c6c3bb19426be68d1a8b0 # https://github.com/vedantmgoyal2009/winget-releaser/releases/tag/v2
|
||||||
|
with:
|
||||||
|
identifier: Databricks.DatabricksCLI
|
||||||
|
installers-regex: 'windows_.*-signed\.zip$' # Only signed Windows releases
|
||||||
|
token: ${{ secrets.ENG_DEV_ECOSYSTEM_BOT_TOKEN }}
|
||||||
|
fork-user: eng-dev-ecosystem-bot
|
|
@ -33,10 +33,10 @@ jobs:
|
||||||
- name: Setup Go
|
- name: Setup Go
|
||||||
uses: actions/setup-go@v5
|
uses: actions/setup-go@v5
|
||||||
with:
|
with:
|
||||||
go-version: 1.21.x
|
go-version: 1.22.x
|
||||||
|
|
||||||
- name: Setup Python
|
- name: Setup Python
|
||||||
uses: actions/setup-python@v4
|
uses: actions/setup-python@v5
|
||||||
with:
|
with:
|
||||||
python-version: '3.9'
|
python-version: '3.9'
|
||||||
|
|
||||||
|
@ -56,7 +56,7 @@ jobs:
|
||||||
run: make test
|
run: make test
|
||||||
|
|
||||||
- name: Publish test coverage
|
- name: Publish test coverage
|
||||||
uses: codecov/codecov-action@v3
|
uses: codecov/codecov-action@v4
|
||||||
|
|
||||||
fmt:
|
fmt:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
|
@ -68,7 +68,7 @@ jobs:
|
||||||
- name: Setup Go
|
- name: Setup Go
|
||||||
uses: actions/setup-go@v5
|
uses: actions/setup-go@v5
|
||||||
with:
|
with:
|
||||||
go-version: 1.21.x
|
go-version: 1.22.x
|
||||||
|
|
||||||
# No need to download cached dependencies when running gofmt.
|
# No need to download cached dependencies when running gofmt.
|
||||||
cache: false
|
cache: false
|
||||||
|
@ -89,3 +89,29 @@ jobs:
|
||||||
run: |
|
run: |
|
||||||
# Exit with status code 1 if there are differences (i.e. unformatted files)
|
# Exit with status code 1 if there are differences (i.e. unformatted files)
|
||||||
git diff --exit-code
|
git diff --exit-code
|
||||||
|
|
||||||
|
validate-bundle-schema:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Setup Go
|
||||||
|
uses: actions/setup-go@v5
|
||||||
|
with:
|
||||||
|
go-version: 1.22.x
|
||||||
|
|
||||||
|
# Github repo: https://github.com/ajv-validator/ajv-cli
|
||||||
|
- name: Install ajv-cli
|
||||||
|
run: npm install -g ajv-cli@5.0.0
|
||||||
|
|
||||||
|
# Assert that the generated bundle schema is a valid JSON schema by using
|
||||||
|
# ajv-cli to validate it against a sample configuration file.
|
||||||
|
# By default the ajv-cli runs in strict mode which will fail if the schema
|
||||||
|
# itself is not valid. Strict mode is more strict than the JSON schema
|
||||||
|
# specification. See for details: https://ajv.js.org/options.html#strict-mode-options
|
||||||
|
- name: Validate bundle schema
|
||||||
|
run: |
|
||||||
|
go run main.go bundle schema > schema.json
|
||||||
|
ajv -s schema.json -d ./bundle/tests/basic/databricks.yml
|
||||||
|
|
|
@ -21,33 +21,41 @@ jobs:
|
||||||
- name: Setup Go
|
- name: Setup Go
|
||||||
uses: actions/setup-go@v5
|
uses: actions/setup-go@v5
|
||||||
with:
|
with:
|
||||||
go-version: 1.21.x
|
go-version: 1.22.x
|
||||||
|
|
||||||
|
# The default cache key for this action considers only the `go.sum` file.
|
||||||
|
# We include .goreleaser.yaml here to differentiate from the cache used by the push action
|
||||||
|
# that runs unit tests. This job produces and uses a different cache.
|
||||||
|
cache-dependency-path: |
|
||||||
|
go.sum
|
||||||
|
.goreleaser.yaml
|
||||||
|
|
||||||
- name: Hide snapshot tag to outsmart GoReleaser
|
- name: Hide snapshot tag to outsmart GoReleaser
|
||||||
run: git tag -d snapshot || true
|
run: git tag -d snapshot || true
|
||||||
|
|
||||||
- name: Run GoReleaser
|
- name: Run GoReleaser
|
||||||
uses: goreleaser/goreleaser-action@v4
|
id: releaser
|
||||||
|
uses: goreleaser/goreleaser-action@v6
|
||||||
with:
|
with:
|
||||||
version: latest
|
version: ~> v2
|
||||||
args: release --snapshot
|
args: release --snapshot --skip docker
|
||||||
|
|
||||||
- name: Upload macOS binaries
|
- name: Upload macOS binaries
|
||||||
uses: actions/upload-artifact@v3
|
uses: actions/upload-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: cli_darwin_snapshot
|
name: cli_darwin_snapshot
|
||||||
path: |
|
path: |
|
||||||
dist/*_darwin_*/
|
dist/*_darwin_*/
|
||||||
|
|
||||||
- name: Upload Linux binaries
|
- name: Upload Linux binaries
|
||||||
uses: actions/upload-artifact@v3
|
uses: actions/upload-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: cli_linux_snapshot
|
name: cli_linux_snapshot
|
||||||
path: |
|
path: |
|
||||||
dist/*_linux_*/
|
dist/*_linux_*/
|
||||||
|
|
||||||
- name: Upload Windows binaries
|
- name: Upload Windows binaries
|
||||||
uses: actions/upload-artifact@v3
|
uses: actions/upload-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: cli_windows_snapshot
|
name: cli_windows_snapshot
|
||||||
path: |
|
path: |
|
||||||
|
|
|
@ -22,13 +22,33 @@ jobs:
|
||||||
- name: Setup Go
|
- name: Setup Go
|
||||||
uses: actions/setup-go@v5
|
uses: actions/setup-go@v5
|
||||||
with:
|
with:
|
||||||
go-version: 1.21.x
|
go-version: 1.22.x
|
||||||
|
|
||||||
|
# The default cache key for this action considers only the `go.sum` file.
|
||||||
|
# We include .goreleaser.yaml here to differentiate from the cache used by the push action
|
||||||
|
# that runs unit tests. This job produces and uses a different cache.
|
||||||
|
cache-dependency-path: |
|
||||||
|
go.sum
|
||||||
|
.goreleaser.yaml
|
||||||
|
|
||||||
|
# Log into the GitHub Container Registry. The goreleaser action will create
|
||||||
|
# the docker images and push them to the GitHub Container Registry.
|
||||||
|
- uses: "docker/login-action@v3"
|
||||||
|
with:
|
||||||
|
registry: "ghcr.io"
|
||||||
|
username: "${{ github.actor }}"
|
||||||
|
password: "${{ secrets.GITHUB_TOKEN }}"
|
||||||
|
|
||||||
|
# QEMU is required to build cross platform docker images using buildx.
|
||||||
|
# It allows virtualization of the CPU architecture at the application level.
|
||||||
|
- name: Set up QEMU dependency
|
||||||
|
uses: docker/setup-qemu-action@v3
|
||||||
|
|
||||||
- name: Run GoReleaser
|
- name: Run GoReleaser
|
||||||
id: releaser
|
id: releaser
|
||||||
uses: goreleaser/goreleaser-action@v4
|
uses: goreleaser/goreleaser-action@v6
|
||||||
with:
|
with:
|
||||||
version: latest
|
version: ~> v2
|
||||||
args: release
|
args: release
|
||||||
env:
|
env:
|
||||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
@ -71,7 +91,7 @@ jobs:
|
||||||
with:
|
with:
|
||||||
github-token: ${{ secrets.DECO_GITHUB_TOKEN }}
|
github-token: ${{ secrets.DECO_GITHUB_TOKEN }}
|
||||||
script: |
|
script: |
|
||||||
let artifacts = JSON.parse('${{ needs.goreleaser.outputs.artifacts }}')
|
let artifacts = ${{ needs.goreleaser.outputs.artifacts }}
|
||||||
artifacts = artifacts.filter(a => a.type == "Archive")
|
artifacts = artifacts.filter(a => a.type == "Archive")
|
||||||
artifacts = new Map(
|
artifacts = new Map(
|
||||||
artifacts.map(a => [
|
artifacts.map(a => [
|
||||||
|
@ -117,14 +137,3 @@ jobs:
|
||||||
version: "${{ env.VERSION }}",
|
version: "${{ env.VERSION }}",
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
publish-to-winget-pkgs:
|
|
||||||
needs: goreleaser
|
|
||||||
runs-on: windows-latest
|
|
||||||
environment: release
|
|
||||||
steps:
|
|
||||||
- uses: vedantmgoyal2009/winget-releaser@93fd8b606a1672ec3e5c6c3bb19426be68d1a8b0 # https://github.com/vedantmgoyal2009/winget-releaser/releases/tag/v2
|
|
||||||
with:
|
|
||||||
identifier: Databricks.DatabricksCLI
|
|
||||||
installers-regex: 'windows_.*\.zip$' # Only windows releases
|
|
||||||
token: ${{ secrets.ENG_DEV_ECOSYSTEM_BOT_TOKEN }}
|
|
||||||
|
|
|
@ -1,6 +1,9 @@
|
||||||
|
version: 2
|
||||||
|
|
||||||
before:
|
before:
|
||||||
hooks:
|
hooks:
|
||||||
- go mod tidy
|
- go mod download
|
||||||
|
|
||||||
builds:
|
builds:
|
||||||
- env:
|
- env:
|
||||||
- CGO_ENABLED=0
|
- CGO_ENABLED=0
|
||||||
|
@ -36,6 +39,7 @@ builds:
|
||||||
- amd64
|
- amd64
|
||||||
- arm64
|
- arm64
|
||||||
binary: databricks
|
binary: databricks
|
||||||
|
|
||||||
archives:
|
archives:
|
||||||
- format: zip
|
- format: zip
|
||||||
|
|
||||||
|
@ -45,11 +49,54 @@ archives:
|
||||||
# file name then additional logic to clean up older builds would be needed.
|
# file name then additional logic to clean up older builds would be needed.
|
||||||
name_template: 'databricks_cli_{{ if not .IsSnapshot }}{{ .Version }}_{{ end }}{{ .Os }}_{{ .Arch }}'
|
name_template: 'databricks_cli_{{ if not .IsSnapshot }}{{ .Version }}_{{ end }}{{ .Os }}_{{ .Arch }}'
|
||||||
|
|
||||||
|
dockers:
|
||||||
|
- id: arm64
|
||||||
|
goarch: arm64
|
||||||
|
# We need to use buildx to build arm64 image on a amd64 machine.
|
||||||
|
use: buildx
|
||||||
|
image_templates:
|
||||||
|
# Docker tags can't have "+" in them, so we replace it with "-"
|
||||||
|
- 'ghcr.io/databricks/cli:{{replace .Version "+" "-"}}-arm64'
|
||||||
|
- 'ghcr.io/databricks/cli:latest-arm64'
|
||||||
|
build_flag_templates:
|
||||||
|
- "--build-arg=ARCH=arm64"
|
||||||
|
- "--platform=linux/arm64"
|
||||||
|
extra_files:
|
||||||
|
- "./docker/config.tfrc"
|
||||||
|
- "./docker/setup.sh"
|
||||||
|
|
||||||
|
- id: amd64
|
||||||
|
goarch: amd64
|
||||||
|
use: buildx
|
||||||
|
image_templates:
|
||||||
|
# Docker tags can't have "+" in them, so we replace it with "-"
|
||||||
|
- 'ghcr.io/databricks/cli:{{replace .Version "+" "-"}}-amd64'
|
||||||
|
- 'ghcr.io/databricks/cli:latest-amd64'
|
||||||
|
build_flag_templates:
|
||||||
|
- "--build-arg=ARCH=amd64"
|
||||||
|
- "--platform=linux/amd64"
|
||||||
|
extra_files:
|
||||||
|
- "./docker/config.tfrc"
|
||||||
|
- "./docker/setup.sh"
|
||||||
|
|
||||||
|
docker_manifests:
|
||||||
|
- name_template: ghcr.io/databricks/cli:{{replace .Version "+" "-"}}
|
||||||
|
image_templates:
|
||||||
|
- ghcr.io/databricks/cli:{{replace .Version "+" "-"}}-amd64
|
||||||
|
- ghcr.io/databricks/cli:{{replace .Version "+" "-"}}-arm64
|
||||||
|
- name_template: ghcr.io/databricks/cli:latest
|
||||||
|
image_templates:
|
||||||
|
- ghcr.io/databricks/cli:latest-amd64
|
||||||
|
- ghcr.io/databricks/cli:latest-arm64
|
||||||
|
|
||||||
|
|
||||||
checksum:
|
checksum:
|
||||||
name_template: 'databricks_cli_{{ .Version }}_SHA256SUMS'
|
name_template: 'databricks_cli_{{ .Version }}_SHA256SUMS'
|
||||||
algorithm: sha256
|
algorithm: sha256
|
||||||
|
|
||||||
snapshot:
|
snapshot:
|
||||||
name_template: '{{ incpatch .Version }}-dev+{{ .ShortCommit }}'
|
name_template: '{{ incpatch .Version }}-dev+{{ .ShortCommit }}'
|
||||||
|
|
||||||
changelog:
|
changelog:
|
||||||
sort: asc
|
sort: asc
|
||||||
filters:
|
filters:
|
||||||
|
|
|
@ -0,0 +1,10 @@
|
||||||
|
with-expecter: true
|
||||||
|
filename: "mock_{{.InterfaceName | snakecase}}.go"
|
||||||
|
mockname: "Mock{{.InterfaceName}}"
|
||||||
|
outpkg: "mock{{.PackageName}}"
|
||||||
|
packages:
|
||||||
|
github.com/databricks/cli/libs/filer:
|
||||||
|
interfaces:
|
||||||
|
Filer:
|
||||||
|
config:
|
||||||
|
dir: "internal/mocks/libs/filer"
|
521
CHANGELOG.md
521
CHANGELOG.md
|
@ -1,5 +1,526 @@
|
||||||
# Version changelog
|
# Version changelog
|
||||||
|
|
||||||
|
## 0.223.2
|
||||||
|
|
||||||
|
Bundles:
|
||||||
|
* Override complex variables with target overrides instead of merging ([#1567](https://github.com/databricks/cli/pull/1567)).
|
||||||
|
* Rewrite local path for libraries in foreach tasks ([#1569](https://github.com/databricks/cli/pull/1569)).
|
||||||
|
* Change SetVariables mutator to mutate dynamic configuration instead ([#1573](https://github.com/databricks/cli/pull/1573)).
|
||||||
|
* Return early in bundle destroy if no deployment exists ([#1581](https://github.com/databricks/cli/pull/1581)).
|
||||||
|
* Let notebook detection code use underlying metadata if available ([#1574](https://github.com/databricks/cli/pull/1574)).
|
||||||
|
* Remove schema override for variable default value ([#1536](https://github.com/databricks/cli/pull/1536)).
|
||||||
|
* Print diagnostics in 'bundle deploy' ([#1579](https://github.com/databricks/cli/pull/1579)).
|
||||||
|
|
||||||
|
Internal:
|
||||||
|
* Update actions/upload-artifact to v4 ([#1559](https://github.com/databricks/cli/pull/1559)).
|
||||||
|
* Use Go 1.22 to build and test ([#1562](https://github.com/databricks/cli/pull/1562)).
|
||||||
|
* Move bespoke status call to main workspace files filer ([#1570](https://github.com/databricks/cli/pull/1570)).
|
||||||
|
* Add new template ([#1578](https://github.com/databricks/cli/pull/1578)).
|
||||||
|
* Add regression tests for CLI error output ([#1566](https://github.com/databricks/cli/pull/1566)).
|
||||||
|
|
||||||
|
Dependency updates:
|
||||||
|
* Bump golang.org/x/mod from 0.18.0 to 0.19.0 ([#1576](https://github.com/databricks/cli/pull/1576)).
|
||||||
|
* Bump golang.org/x/term from 0.21.0 to 0.22.0 ([#1577](https://github.com/databricks/cli/pull/1577)).
|
||||||
|
|
||||||
|
## 0.223.1
|
||||||
|
|
||||||
|
This bugfix release fixes missing error messages in v0.223.0.
|
||||||
|
|
||||||
|
CLI:
|
||||||
|
* Fix logic error in [#1532](https://github.com/databricks/cli/pull/1532) ([#1564](https://github.com/databricks/cli/pull/1564)).
|
||||||
|
|
||||||
|
|
||||||
|
## 0.223.0
|
||||||
|
|
||||||
|
Bundles:
|
||||||
|
|
||||||
|
As of this release you can interact with bundles when running the CLI on DBR (e.g. via the Web Terminal).
|
||||||
|
|
||||||
|
* Fix non-default project names not working in dbt-sql template ([#1500](https://github.com/databricks/cli/pull/1500)).
|
||||||
|
* Improve `bundle validate` output ([#1532](https://github.com/databricks/cli/pull/1532)).
|
||||||
|
* Fixed resolving variable references inside slice variable ([#1550](https://github.com/databricks/cli/pull/1550)).
|
||||||
|
* Fixed bundle not loading when empty variable is defined ([#1552](https://github.com/databricks/cli/pull/1552)).
|
||||||
|
* Use `vfs.Path` for filesystem interaction ([#1554](https://github.com/databricks/cli/pull/1554)).
|
||||||
|
* Replace `vfs.Path` with extension-aware filer when running on DBR ([#1556](https://github.com/databricks/cli/pull/1556)).
|
||||||
|
|
||||||
|
Internal:
|
||||||
|
* merge.Override: Fix handling of dyn.NilValue ([#1530](https://github.com/databricks/cli/pull/1530)).
|
||||||
|
* Compare `.Kind()` instead of direct equality checks on a `dyn.Value` ([#1520](https://github.com/databricks/cli/pull/1520)).
|
||||||
|
* PythonMutator: register product in user agent extra ([#1533](https://github.com/databricks/cli/pull/1533)).
|
||||||
|
* Ignore `dyn.NilValue` when traversing value from `dyn.Map` ([#1547](https://github.com/databricks/cli/pull/1547)).
|
||||||
|
* Add extra tests for the sync block ([#1548](https://github.com/databricks/cli/pull/1548)).
|
||||||
|
* PythonMutator: add diagnostics ([#1531](https://github.com/databricks/cli/pull/1531)).
|
||||||
|
* PythonMutator: support omitempty in PyDABs ([#1513](https://github.com/databricks/cli/pull/1513)).
|
||||||
|
* PythonMutator: allow insert 'resources' and 'resources.jobs' ([#1555](https://github.com/databricks/cli/pull/1555)).
|
||||||
|
|
||||||
|
## 0.222.0
|
||||||
|
|
||||||
|
CLI:
|
||||||
|
* Add link to documentation for Homebrew installation to README ([#1505](https://github.com/databricks/cli/pull/1505)).
|
||||||
|
* Fix `databricks configure` to use `DATABRICKS_CONFIG_FILE` environment variable if exists as config file ([#1325](https://github.com/databricks/cli/pull/1325)).
|
||||||
|
|
||||||
|
Bundles:
|
||||||
|
|
||||||
|
The Terraform upgrade to v1.48.0 includes a fix for library order not being respected.
|
||||||
|
|
||||||
|
* Fix conditional in query in `default-sql` template ([#1479](https://github.com/databricks/cli/pull/1479)).
|
||||||
|
* Remove user credentials specified in the Git origin URL ([#1494](https://github.com/databricks/cli/pull/1494)).
|
||||||
|
* Serialize dynamic value for `bundle validate` output ([#1499](https://github.com/databricks/cli/pull/1499)).
|
||||||
|
* Override variables with lookup value even if values has default value set ([#1504](https://github.com/databricks/cli/pull/1504)).
|
||||||
|
* Pause quality monitors when "mode: development" is used ([#1481](https://github.com/databricks/cli/pull/1481)).
|
||||||
|
* Return `fs.ModeDir` for Git folders in the workspace ([#1521](https://github.com/databricks/cli/pull/1521)).
|
||||||
|
* Upgrade TF provider to 1.48.0 ([#1527](https://github.com/databricks/cli/pull/1527)).
|
||||||
|
* Added support for complex variables ([#1467](https://github.com/databricks/cli/pull/1467)).
|
||||||
|
|
||||||
|
Internal:
|
||||||
|
* Add randIntn function ([#1475](https://github.com/databricks/cli/pull/1475)).
|
||||||
|
* Avoid multiple file tree traversals on bundle deploy ([#1493](https://github.com/databricks/cli/pull/1493)).
|
||||||
|
* Clean up unused code ([#1502](https://github.com/databricks/cli/pull/1502)).
|
||||||
|
* Use `dyn.InvalidValue` to indicate absence ([#1507](https://github.com/databricks/cli/pull/1507)).
|
||||||
|
* Add ApplyPythonMutator ([#1430](https://github.com/databricks/cli/pull/1430)).
|
||||||
|
* Set bool pointer to disable lock ([#1516](https://github.com/databricks/cli/pull/1516)).
|
||||||
|
* Allow the any type to be set to nil in `convert.FromTyped` ([#1518](https://github.com/databricks/cli/pull/1518)).
|
||||||
|
* Properly deal with nil values in `convert.FromTyped` ([#1511](https://github.com/databricks/cli/pull/1511)).
|
||||||
|
* Return `dyn.InvalidValue` instead of `dyn.NilValue` when errors happen ([#1514](https://github.com/databricks/cli/pull/1514)).
|
||||||
|
* PythonMutator: replace stdin/stdout with files ([#1512](https://github.com/databricks/cli/pull/1512)).
|
||||||
|
* Add context type and value to path rewriting ([#1525](https://github.com/databricks/cli/pull/1525)).
|
||||||
|
|
||||||
|
API Changes:
|
||||||
|
* Added schedule CRUD commands to `databricks lakeview`.
|
||||||
|
* Added subscription CRUD commands to `databricks lakeview`.
|
||||||
|
* Added `databricks apps start` command.
|
||||||
|
|
||||||
|
OpenAPI commit 7437dabb9dadee402c1fc060df4c1ce8cc5369f0 (2024-06-24)
|
||||||
|
|
||||||
|
Dependency updates:
|
||||||
|
* Bump golang.org/x/text from 0.15.0 to 0.16.0 ([#1482](https://github.com/databricks/cli/pull/1482)).
|
||||||
|
* Bump golang.org/x/term from 0.20.0 to 0.21.0 ([#1483](https://github.com/databricks/cli/pull/1483)).
|
||||||
|
* Bump golang.org/x/mod from 0.17.0 to 0.18.0 ([#1484](https://github.com/databricks/cli/pull/1484)).
|
||||||
|
* Bump golang.org/x/oauth2 from 0.20.0 to 0.21.0 ([#1485](https://github.com/databricks/cli/pull/1485)).
|
||||||
|
* Bump github.com/briandowns/spinner from 1.23.0 to 1.23.1 ([#1495](https://github.com/databricks/cli/pull/1495)).
|
||||||
|
* Bump github.com/spf13/cobra from 1.8.0 to 1.8.1 ([#1496](https://github.com/databricks/cli/pull/1496)).
|
||||||
|
* Bump github.com/databricks/databricks-sdk-go from 0.42.0 to 0.43.0 ([#1522](https://github.com/databricks/cli/pull/1522)).
|
||||||
|
|
||||||
|
## 0.221.1
|
||||||
|
|
||||||
|
Bundles:
|
||||||
|
|
||||||
|
This releases fixes an issue introduced in v0.221.0 where managing jobs with a single-node cluster would fail.
|
||||||
|
|
||||||
|
* Fix SQL schema selection in default-sql template ([#1471](https://github.com/databricks/cli/pull/1471)).
|
||||||
|
* Copy-editing for SQL templates ([#1474](https://github.com/databricks/cli/pull/1474)).
|
||||||
|
* Upgrade TF provider to 1.47.0 ([#1476](https://github.com/databricks/cli/pull/1476)).
|
||||||
|
|
||||||
|
Internal:
|
||||||
|
* Use latest version of goreleaser action ([#1477](https://github.com/databricks/cli/pull/1477)).
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
## 0.221.0
|
||||||
|
|
||||||
|
CLI:
|
||||||
|
* Update OpenAPI spec ([#1466](https://github.com/databricks/cli/pull/1466)).
|
||||||
|
|
||||||
|
Bundles:
|
||||||
|
* Upgrade TF provider to 1.46.0 ([#1460](https://github.com/databricks/cli/pull/1460)).
|
||||||
|
* Add support for Lakehouse monitoring ([#1307](https://github.com/databricks/cli/pull/1307)).
|
||||||
|
* Make dbt-sql and default-sql templates public ([#1463](https://github.com/databricks/cli/pull/1463)).
|
||||||
|
|
||||||
|
Internal:
|
||||||
|
* Abstract over filesystem interaction with libs/vfs ([#1452](https://github.com/databricks/cli/pull/1452)).
|
||||||
|
* Add `filer.Filer` to read notebooks from WSFS without omitting their extension ([#1457](https://github.com/databricks/cli/pull/1457)).
|
||||||
|
* Fix listing notebooks in a subdirectory ([#1468](https://github.com/databricks/cli/pull/1468)).
|
||||||
|
|
||||||
|
API Changes:
|
||||||
|
* Changed `databricks account storage-credentials list` command to return .
|
||||||
|
* Added `databricks consumer-listings batch-get` command.
|
||||||
|
* Added `databricks consumer-providers batch-get` command.
|
||||||
|
* Removed `databricks apps create-deployment` command.
|
||||||
|
* Added `databricks apps deploy` command.
|
||||||
|
|
||||||
|
OpenAPI commit 37b925eba37dfb3d7e05b6ba2d458454ce62d3a0 (2024-06-03)
|
||||||
|
|
||||||
|
Dependency updates:
|
||||||
|
* Bump github.com/hashicorp/go-version from 1.6.0 to 1.7.0 ([#1454](https://github.com/databricks/cli/pull/1454)).
|
||||||
|
* Bump github.com/hashicorp/hc-install from 0.6.4 to 0.7.0 ([#1453](https://github.com/databricks/cli/pull/1453)).
|
||||||
|
|
||||||
|
## 0.220.0
|
||||||
|
|
||||||
|
CLI:
|
||||||
|
* Add line about Docker installation to README.md ([#1363](https://github.com/databricks/cli/pull/1363)).
|
||||||
|
* Improve token refresh flow ([#1434](https://github.com/databricks/cli/pull/1434)).
|
||||||
|
|
||||||
|
Bundles:
|
||||||
|
* Upgrade Terraform provider to v1.42.0 ([#1418](https://github.com/databricks/cli/pull/1418)).
|
||||||
|
* Upgrade Terraform provider to v1.43.0 ([#1429](https://github.com/databricks/cli/pull/1429)).
|
||||||
|
* Don't merge-in remote resources during deployments ([#1432](https://github.com/databricks/cli/pull/1432)).
|
||||||
|
* Remove dependency on `ConfigFilePath` from path translation mutator ([#1437](https://github.com/databricks/cli/pull/1437)).
|
||||||
|
* Add `merge.Override` transform ([#1428](https://github.com/databricks/cli/pull/1428)).
|
||||||
|
* Fixed panic when loading incorrectly defined jobs ([#1402](https://github.com/databricks/cli/pull/1402)).
|
||||||
|
* Add more tests for `merge.Override` ([#1439](https://github.com/databricks/cli/pull/1439)).
|
||||||
|
* Fixed seg fault when specifying environment key for tasks ([#1443](https://github.com/databricks/cli/pull/1443)).
|
||||||
|
* Fix conversion of zero valued scalar pointers to a dynamic value ([#1433](https://github.com/databricks/cli/pull/1433)).
|
||||||
|
|
||||||
|
Internal:
|
||||||
|
* Don't hide commands of services that are already hidden ([#1438](https://github.com/databricks/cli/pull/1438)).
|
||||||
|
|
||||||
|
API Changes:
|
||||||
|
* Renamed `lakehouse-monitors` command group to `quality-monitors`.
|
||||||
|
* Added `apps` command group.
|
||||||
|
* Renamed `csp-enablement` command group to `compliance-security-profile`.
|
||||||
|
* Renamed `esm-enablement` command group to `enhanced-security-monitoring`.
|
||||||
|
* Added `databricks vector-search-indexes scan-index` command.
|
||||||
|
|
||||||
|
OpenAPI commit 7eb5ad9a2ed3e3f1055968a2d1014ac92c06fe92 (2024-05-21)
|
||||||
|
|
||||||
|
Dependency updates:
|
||||||
|
* Bump golang.org/x/text from 0.14.0 to 0.15.0 ([#1419](https://github.com/databricks/cli/pull/1419)).
|
||||||
|
* Bump golang.org/x/oauth2 from 0.19.0 to 0.20.0 ([#1421](https://github.com/databricks/cli/pull/1421)).
|
||||||
|
* Bump golang.org/x/term from 0.19.0 to 0.20.0 ([#1422](https://github.com/databricks/cli/pull/1422)).
|
||||||
|
* Bump github.com/databricks/databricks-sdk-go from 0.39.0 to 0.40.1 ([#1431](https://github.com/databricks/cli/pull/1431)).
|
||||||
|
* Bump github.com/fatih/color from 1.16.0 to 1.17.0 ([#1441](https://github.com/databricks/cli/pull/1441)).
|
||||||
|
* Bump github.com/hashicorp/terraform-json from 0.21.0 to 0.22.1 ([#1440](https://github.com/databricks/cli/pull/1440)).
|
||||||
|
* Bump github.com/hashicorp/terraform-exec from 0.20.0 to 0.21.0 ([#1442](https://github.com/databricks/cli/pull/1442)).
|
||||||
|
* Update Go SDK to v0.41.0 ([#1445](https://github.com/databricks/cli/pull/1445)).
|
||||||
|
|
||||||
|
## 0.219.0
|
||||||
|
|
||||||
|
Bundles:
|
||||||
|
* Don't fail while parsing outdated terraform state ([#1404](https://github.com/databricks/cli/pull/1404)).
|
||||||
|
* Annotate DLT pipelines when deployed using DABs ([#1410](https://github.com/databricks/cli/pull/1410)).
|
||||||
|
|
||||||
|
|
||||||
|
API Changes:
|
||||||
|
* Changed `databricks libraries cluster-status` command. New request type is compute.ClusterStatus.
|
||||||
|
* Changed `databricks libraries cluster-status` command to return .
|
||||||
|
* Added `databricks serving-endpoints get-open-api` command.
|
||||||
|
|
||||||
|
OpenAPI commit 21f9f1482f9d0d15228da59f2cd9f0863d2a6d55 (2024-04-23)
|
||||||
|
Dependency updates:
|
||||||
|
* Bump github.com/databricks/databricks-sdk-go from 0.38.0 to 0.39.0 ([#1405](https://github.com/databricks/cli/pull/1405)).
|
||||||
|
|
||||||
|
## 0.218.1
|
||||||
|
|
||||||
|
This is a bugfix release.
|
||||||
|
|
||||||
|
CLI:
|
||||||
|
* Pass `DATABRICKS_CONFIG_FILE` for `auth profiles` ([#1394](https://github.com/databricks/cli/pull/1394)).
|
||||||
|
|
||||||
|
Bundles:
|
||||||
|
* Show a better error message for using wheel tasks with older DBR versions ([#1373](https://github.com/databricks/cli/pull/1373)).
|
||||||
|
* Allow variable references in non-string fields in the JSON schema ([#1398](https://github.com/databricks/cli/pull/1398)).
|
||||||
|
* Fix variable overrides in targets for non-string variables ([#1397](https://github.com/databricks/cli/pull/1397)).
|
||||||
|
* Fix bundle schema for variables ([#1396](https://github.com/databricks/cli/pull/1396)).
|
||||||
|
* Fix bundle documentation URL ([#1399](https://github.com/databricks/cli/pull/1399)).
|
||||||
|
|
||||||
|
Internal:
|
||||||
|
* Removed autogenerated docs for the CLI commands ([#1392](https://github.com/databricks/cli/pull/1392)).
|
||||||
|
* Remove `JSON.parse` call from homebrew-tap action ([#1393](https://github.com/databricks/cli/pull/1393)).
|
||||||
|
* Ensure that Python dependencies are installed during upgrade ([#1390](https://github.com/databricks/cli/pull/1390)).
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
## 0.218.0
|
||||||
|
|
||||||
|
This release marks the general availability of Databricks Asset Bundles.
|
||||||
|
|
||||||
|
CLI:
|
||||||
|
* Publish Docker images ([#1353](https://github.com/databricks/cli/pull/1353)).
|
||||||
|
* Add support for multi-arch Docker images ([#1362](https://github.com/databricks/cli/pull/1362)).
|
||||||
|
* Do not prefill https:// in prompt for Databricks Host ([#1364](https://github.com/databricks/cli/pull/1364)).
|
||||||
|
* Add better documentation for the `auth login` command ([#1366](https://github.com/databricks/cli/pull/1366)).
|
||||||
|
* Add URLs for authentication documentation to the auth command help ([#1365](https://github.com/databricks/cli/pull/1365)).
|
||||||
|
|
||||||
|
Bundles:
|
||||||
|
* Fix compute override for foreach tasks ([#1357](https://github.com/databricks/cli/pull/1357)).
|
||||||
|
* Transform artifact files source patterns in build not upload stage ([#1359](https://github.com/databricks/cli/pull/1359)).
|
||||||
|
* Convert between integer and float in normalization ([#1371](https://github.com/databricks/cli/pull/1371)).
|
||||||
|
* Disable locking for development mode ([#1302](https://github.com/databricks/cli/pull/1302)).
|
||||||
|
* Resolve variable references inside variable lookup fields ([#1368](https://github.com/databricks/cli/pull/1368)).
|
||||||
|
* Added validate mutator to surface additional bundle warnings ([#1352](https://github.com/databricks/cli/pull/1352)).
|
||||||
|
* Upgrade terraform-provider-databricks to 1.40.0 ([#1376](https://github.com/databricks/cli/pull/1376)).
|
||||||
|
* Print host in `bundle validate` when passed via profile or environment variables ([#1378](https://github.com/databricks/cli/pull/1378)).
|
||||||
|
* Cleanup remote file path on bundle destroy ([#1374](https://github.com/databricks/cli/pull/1374)).
|
||||||
|
* Add docs URL for `run_as` in error message ([#1381](https://github.com/databricks/cli/pull/1381)).
|
||||||
|
* Enable job queueing by default ([#1385](https://github.com/databricks/cli/pull/1385)).
|
||||||
|
* Added support for job environments ([#1379](https://github.com/databricks/cli/pull/1379)).
|
||||||
|
* Processing and completion of positional args to bundle run ([#1120](https://github.com/databricks/cli/pull/1120)).
|
||||||
|
* Add legacy option for `run_as` ([#1384](https://github.com/databricks/cli/pull/1384)).
|
||||||
|
|
||||||
|
API Changes:
|
||||||
|
* Changed `databricks lakehouse-monitors cancel-refresh` command with new required argument order.
|
||||||
|
* Changed `databricks lakehouse-monitors create` command with new required argument order.
|
||||||
|
* Changed `databricks lakehouse-monitors delete` command with new required argument order.
|
||||||
|
* Changed `databricks lakehouse-monitors get` command with new required argument order.
|
||||||
|
* Changed `databricks lakehouse-monitors get-refresh` command with new required argument order.
|
||||||
|
* Changed `databricks lakehouse-monitors list-refreshes` command with new required argument order.
|
||||||
|
* Changed `databricks lakehouse-monitors run-refresh` command with new required argument order.
|
||||||
|
* Changed `databricks lakehouse-monitors update` command with new required argument order.
|
||||||
|
* Changed `databricks account workspace-assignment update` command to return response.
|
||||||
|
|
||||||
|
OpenAPI commit 94684175b8bd65f8701f89729351f8069e8309c9 (2024-04-11)
|
||||||
|
|
||||||
|
Dependency updates:
|
||||||
|
* Bump github.com/databricks/databricks-sdk-go from 0.37.0 to 0.38.0 ([#1361](https://github.com/databricks/cli/pull/1361)).
|
||||||
|
* Bump golang.org/x/net from 0.22.0 to 0.23.0 ([#1380](https://github.com/databricks/cli/pull/1380)).
|
||||||
|
|
||||||
|
## 0.217.1
|
||||||
|
|
||||||
|
CLI:
|
||||||
|
* Don't attempt auth in `auth profiles --skip-validate` ([#1282](https://github.com/databricks/cli/pull/1282)).
|
||||||
|
* Fixed typo in error template for auth describe ([#1341](https://github.com/databricks/cli/pull/1341)).
|
||||||
|
|
||||||
|
Bundles:
|
||||||
|
* Correctly transform libraries in for_each_task block ([#1340](https://github.com/databricks/cli/pull/1340)).
|
||||||
|
* Do not emit warning on YAML anchor blocks ([#1354](https://github.com/databricks/cli/pull/1354)).
|
||||||
|
* Fixed pre-init script order ([#1348](https://github.com/databricks/cli/pull/1348)).
|
||||||
|
* Execute preinit after entry point to make sure scripts are loaded ([#1351](https://github.com/databricks/cli/pull/1351)).
|
||||||
|
|
||||||
|
|
||||||
|
Dependency updates:
|
||||||
|
* Bump internal terraform provider version to `1.39` ([#1339](https://github.com/databricks/cli/pull/1339)).
|
||||||
|
* Bump golang.org/x/term from 0.18.0 to 0.19.0 ([#1343](https://github.com/databricks/cli/pull/1343)).
|
||||||
|
* Bump github.com/hashicorp/hc-install from 0.6.3 to 0.6.4 ([#1344](https://github.com/databricks/cli/pull/1344)).
|
||||||
|
* Bump golang.org/x/mod from 0.16.0 to 0.17.0 ([#1345](https://github.com/databricks/cli/pull/1345)).
|
||||||
|
* Bump golang.org/x/oauth2 from 0.18.0 to 0.19.0 ([#1347](https://github.com/databricks/cli/pull/1347)).
|
||||||
|
* Bump golang.org/x/sync from 0.6.0 to 0.7.0 ([#1346](https://github.com/databricks/cli/pull/1346)).
|
||||||
|
|
||||||
|
## 0.217.0
|
||||||
|
|
||||||
|
Breaking Change:
|
||||||
|
* Add allow list for resources when bundle `run_as` is set ([#1233](https://github.com/databricks/cli/pull/1233)).
|
||||||
|
* Make bundle validation print text output by default ([#1335](https://github.com/databricks/cli/pull/1335)).
|
||||||
|
|
||||||
|
CLI:
|
||||||
|
* Added `auth describe` command ([#1244](https://github.com/databricks/cli/pull/1244)).
|
||||||
|
* Fixed message for successful auth describe run ([#1336](https://github.com/databricks/cli/pull/1336)).
|
||||||
|
|
||||||
|
Bundles:
|
||||||
|
* Use UserName field to identify if service principal is used ([#1310](https://github.com/databricks/cli/pull/1310)).
|
||||||
|
* Allow unknown properties in the config file for template initialization ([#1315](https://github.com/databricks/cli/pull/1315)).
|
||||||
|
* Remove support for DATABRICKS_BUNDLE_INCLUDES ([#1317](https://github.com/databricks/cli/pull/1317)).
|
||||||
|
* Make `bundle.deployment` optional in the bundle schema ([#1321](https://github.com/databricks/cli/pull/1321)).
|
||||||
|
* Fix the generated DABs JSON schema ([#1322](https://github.com/databricks/cli/pull/1322)).
|
||||||
|
* Make bundle loaders return diagnostics ([#1319](https://github.com/databricks/cli/pull/1319)).
|
||||||
|
* Add `bundle debug terraform` command ([#1294](https://github.com/databricks/cli/pull/1294)).
|
||||||
|
* Allow specifying CLI version constraints required to run the bundle ([#1320](https://github.com/databricks/cli/pull/1320)).
|
||||||
|
|
||||||
|
Internal:
|
||||||
|
* Retain location information of variable reference ([#1333](https://github.com/databricks/cli/pull/1333)).
|
||||||
|
* Define `dyn.Mapping` to represent maps ([#1301](https://github.com/databricks/cli/pull/1301)).
|
||||||
|
* Return `diag.Diagnostics` from mutators ([#1305](https://github.com/databricks/cli/pull/1305)).
|
||||||
|
* Fix flaky test in `libs/process` ([#1314](https://github.com/databricks/cli/pull/1314)).
|
||||||
|
* Move path field to bundle type ([#1316](https://github.com/databricks/cli/pull/1316)).
|
||||||
|
* Load bundle configuration from mutator ([#1318](https://github.com/databricks/cli/pull/1318)).
|
||||||
|
* Return diagnostics from `config.Load` ([#1324](https://github.com/databricks/cli/pull/1324)).
|
||||||
|
* Return warning for nil primitive types during normalization ([#1329](https://github.com/databricks/cli/pull/1329)).
|
||||||
|
* Include `dyn.Path` in normalization warnings and errors ([#1332](https://github.com/databricks/cli/pull/1332)).
|
||||||
|
* Make normalization return warnings instead of errors ([#1334](https://github.com/databricks/cli/pull/1334)).
|
||||||
|
|
||||||
|
API Changes:
|
||||||
|
* Added `databricks lakeview migrate` command.
|
||||||
|
* Added `databricks lakeview unpublish` command.
|
||||||
|
* Changed `databricks ip-access-lists get` command . New request type is .
|
||||||
|
|
||||||
|
OpenAPI commit e316cc3d78d087522a74650e26586088da9ac8cb (2024-04-03)
|
||||||
|
Dependency updates:
|
||||||
|
* Bump github.com/databricks/databricks-sdk-go from 0.36.0 to 0.37.0 ([#1326](https://github.com/databricks/cli/pull/1326)).
|
||||||
|
|
||||||
|
## 0.216.0
|
||||||
|
|
||||||
|
CLI:
|
||||||
|
* Propagate correct `User-Agent` for CLI during OAuth flow ([#1264](https://github.com/databricks/cli/pull/1264)).
|
||||||
|
* Add usage string when command fails with incorrect arguments ([#1276](https://github.com/databricks/cli/pull/1276)).
|
||||||
|
|
||||||
|
Bundles:
|
||||||
|
* Include `dyn.Path` as argument to the visit callback function ([#1260](https://github.com/databricks/cli/pull/1260)).
|
||||||
|
* Inline logic to set a value in `dyn.SetByPath` ([#1261](https://github.com/databricks/cli/pull/1261)).
|
||||||
|
* Add assertions for the `dyn.Path` argument to the visit callback ([#1265](https://github.com/databricks/cli/pull/1265)).
|
||||||
|
* Add `dyn.MapByPattern` to map a function to values with matching paths ([#1266](https://github.com/databricks/cli/pull/1266)).
|
||||||
|
* Filter current user from resource permissions ([#1262](https://github.com/databricks/cli/pull/1262)).
|
||||||
|
* Retain location annotation when expanding globs for pipeline libraries ([#1274](https://github.com/databricks/cli/pull/1274)).
|
||||||
|
* Added deployment state for bundles ([#1267](https://github.com/databricks/cli/pull/1267)).
|
||||||
|
* Do CheckRunningResource only after terraform.Write ([#1292](https://github.com/databricks/cli/pull/1292)).
|
||||||
|
* Rewrite relative paths using `dyn.Location` of the underlying value ([#1273](https://github.com/databricks/cli/pull/1273)).
|
||||||
|
* Push deployment state right after files upload ([#1293](https://github.com/databricks/cli/pull/1293)).
|
||||||
|
* Make `Append` function to `dyn.Path` return independent slice ([#1295](https://github.com/databricks/cli/pull/1295)).
|
||||||
|
* Move bundle tests into bundle/tests ([#1299](https://github.com/databricks/cli/pull/1299)).
|
||||||
|
* Upgrade Terraform provider to 1.38.0 ([#1308](https://github.com/databricks/cli/pull/1308)).
|
||||||
|
|
||||||
|
Internal:
|
||||||
|
* Add integration test for mlops-stacks initialization ([#1155](https://github.com/databricks/cli/pull/1155)).
|
||||||
|
* Update actions/setup-python to v5 ([#1290](https://github.com/databricks/cli/pull/1290)).
|
||||||
|
* Update codecov/codecov-action to v4 ([#1291](https://github.com/databricks/cli/pull/1291)).
|
||||||
|
|
||||||
|
API Changes:
|
||||||
|
* Changed `databricks catalogs list` command.
|
||||||
|
* Changed `databricks online-tables create` command.
|
||||||
|
* Changed `databricks lakeview publish` command.
|
||||||
|
* Added `databricks lakeview create` command.
|
||||||
|
* Added `databricks lakeview get` command.
|
||||||
|
* Added `databricks lakeview get-published` command.
|
||||||
|
* Added `databricks lakeview trash` command.
|
||||||
|
* Added `databricks lakeview update` command.
|
||||||
|
* Moved settings related commands to `databricks settings` and `databricks account settings`.
|
||||||
|
|
||||||
|
OpenAPI commit 93763b0d7ae908520c229c786fff28b8fd623261 (2024-03-20)
|
||||||
|
|
||||||
|
Dependency updates:
|
||||||
|
* Bump golang.org/x/oauth2 from 0.17.0 to 0.18.0 ([#1270](https://github.com/databricks/cli/pull/1270)).
|
||||||
|
* Bump golang.org/x/mod from 0.15.0 to 0.16.0 ([#1271](https://github.com/databricks/cli/pull/1271)).
|
||||||
|
* Update Go SDK to v0.35.0 ([#1300](https://github.com/databricks/cli/pull/1300)).
|
||||||
|
* Update Go SDK to v0.36.0 ([#1304](https://github.com/databricks/cli/pull/1304)).
|
||||||
|
|
||||||
|
## 0.215.0
|
||||||
|
|
||||||
|
CLI:
|
||||||
|
* The SDK update fixes `fs cp` calls timing out when copying large files.
|
||||||
|
|
||||||
|
Bundles:
|
||||||
|
* Fix summary command when internal Terraform config doesn't exist ([#1242](https://github.com/databricks/cli/pull/1242)).
|
||||||
|
* Configure cobra.NoArgs for bundle commands where applicable ([#1250](https://github.com/databricks/cli/pull/1250)).
|
||||||
|
* Fixed building Python artifacts on Windows with WSL ([#1249](https://github.com/databricks/cli/pull/1249)).
|
||||||
|
* Add `--validate-only` flag to run validate-only pipeline update ([#1251](https://github.com/databricks/cli/pull/1251)).
|
||||||
|
* Only transform wheel libraries when using trampoline ([#1248](https://github.com/databricks/cli/pull/1248)).
|
||||||
|
* Return `application_id` for service principal lookups ([#1245](https://github.com/databricks/cli/pull/1245)).
|
||||||
|
* Support relative paths in artifact files source section and always upload all artifact files ([#1247](https://github.com/databricks/cli/pull/1247)).
|
||||||
|
* Fix DBConnect support in VS Code ([#1253](https://github.com/databricks/cli/pull/1253)).
|
||||||
|
|
||||||
|
Internal:
|
||||||
|
* Added test to verify scripts.Execute mutator works correctly ([#1237](https://github.com/databricks/cli/pull/1237)).
|
||||||
|
|
||||||
|
API Changes:
|
||||||
|
* Added `databricks permission-migration` command group.
|
||||||
|
* Updated nesting of the `databricks settings` and `databricks account settings commands`
|
||||||
|
* Changed `databricks vector-search-endpoints delete-endpoint` command with new required argument order.
|
||||||
|
* Changed `databricks vector-search-indexes create-index` command with new required argument order.
|
||||||
|
* Changed `databricks vector-search-indexes delete-data-vector-index` command with new required argument order.
|
||||||
|
* Changed `databricks vector-search-indexes upsert-data-vector-index` command with new required argument order.
|
||||||
|
|
||||||
|
OpenAPI commit d855b30f25a06fe84f25214efa20e7f1fffcdf9e (2024-03-04)
|
||||||
|
|
||||||
|
Dependency updates:
|
||||||
|
* Bump github.com/stretchr/testify from 1.8.4 to 1.9.0 ([#1252](https://github.com/databricks/cli/pull/1252)).
|
||||||
|
* Update Go SDK to v0.34.0 ([#1256](https://github.com/databricks/cli/pull/1256)).
|
||||||
|
## 0.214.1
|
||||||
|
|
||||||
|
CLI:
|
||||||
|
* Improved error message when no .databrickscfg ([#1223](https://github.com/databricks/cli/pull/1223)).
|
||||||
|
* Use Go SDK Iterators when listing resources with the CLI ([#1202](https://github.com/databricks/cli/pull/1202)).
|
||||||
|
|
||||||
|
Bundles:
|
||||||
|
* Only set ComputeID value when `--compute-id` flag provided ([#1229](https://github.com/databricks/cli/pull/1229)).
|
||||||
|
* Add correct tag value for models in dev mode ([#1230](https://github.com/databricks/cli/pull/1230)).
|
||||||
|
* Upgrade Terraform provider to 1.37.0 ([#1235](https://github.com/databricks/cli/pull/1235)).
|
||||||
|
|
||||||
|
Internal:
|
||||||
|
* Fix CLI nightlies on our UC workspaces ([#1225](https://github.com/databricks/cli/pull/1225)).
|
||||||
|
* Handle alias types for map keys in toTyped conversion ([#1232](https://github.com/databricks/cli/pull/1232)).
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
## 0.214.0
|
||||||
|
|
||||||
|
CLI:
|
||||||
|
* Add support for UC Volumes to the `databricks fs` commands ([#1209](https://github.com/databricks/cli/pull/1209)).
|
||||||
|
|
||||||
|
Bundles:
|
||||||
|
* Use dynamic configuration model in bundles ([#1098](https://github.com/databricks/cli/pull/1098)).
|
||||||
|
* Allow use of variables references in primitive non-string fields ([#1219](https://github.com/databricks/cli/pull/1219)).
|
||||||
|
* Add an experimental default-sql template ([#1051](https://github.com/databricks/cli/pull/1051)).
|
||||||
|
* Add an experimental dbt-sql template ([#1059](https://github.com/databricks/cli/pull/1059)).
|
||||||
|
|
||||||
|
Internal:
|
||||||
|
* Add fork-user to winget release workflow ([#1214](https://github.com/databricks/cli/pull/1214)).
|
||||||
|
* Use `any` as type for data sources and resources in `tf/schema` ([#1216](https://github.com/databricks/cli/pull/1216)).
|
||||||
|
* Avoid infinite recursion when normalizing a recursive type ([#1213](https://github.com/databricks/cli/pull/1213)).
|
||||||
|
* Fix issue where interpolating a new ref would rewrite unrelated fields ([#1217](https://github.com/databricks/cli/pull/1217)).
|
||||||
|
* Use `dyn.Value` as input to generating Terraform JSON ([#1218](https://github.com/databricks/cli/pull/1218)).
|
||||||
|
|
||||||
|
API Changes:
|
||||||
|
* Changed `databricks lakehouse-monitors update` command with new required argument order.
|
||||||
|
* Added `databricks online-tables` command group.
|
||||||
|
|
||||||
|
OpenAPI commit cdd76a98a4fca7008572b3a94427566dd286c63b (2024-02-19)
|
||||||
|
Dependency updates:
|
||||||
|
* Bump Terraform provider to v1.36.2 ([#1215](https://github.com/databricks/cli/pull/1215)).
|
||||||
|
* Bump github.com/databricks/databricks-sdk-go from 0.32.0 to 0.33.0 ([#1222](https://github.com/databricks/cli/pull/1222)).
|
||||||
|
|
||||||
|
## 0.213.0
|
||||||
|
|
||||||
|
CLI:
|
||||||
|
* Ignore environment variables for `auth profiles` ([#1189](https://github.com/databricks/cli/pull/1189)).
|
||||||
|
* Update LICENSE file to match Databricks license language ([#1013](https://github.com/databricks/cli/pull/1013)).
|
||||||
|
|
||||||
|
Bundles:
|
||||||
|
* Added `bundle deployment bind` and `unbind` command ([#1131](https://github.com/databricks/cli/pull/1131)).
|
||||||
|
* Use allowlist for Git-related fields to include in metadata ([#1187](https://github.com/databricks/cli/pull/1187)).
|
||||||
|
* Added `--restart` flag for `bundle run` command ([#1191](https://github.com/databricks/cli/pull/1191)).
|
||||||
|
* Generate correct YAML if `custom_tags` or `spark_conf` is used for pipeline or job cluster configuration ([#1210](https://github.com/databricks/cli/pull/1210)).
|
||||||
|
|
||||||
|
Internal:
|
||||||
|
* Move folders package into libs ([#1184](https://github.com/databricks/cli/pull/1184)).
|
||||||
|
* Log time it takes for profile to load ([#1186](https://github.com/databricks/cli/pull/1186)).
|
||||||
|
* Use mockery to generate mocks compatible with testify/mock ([#1190](https://github.com/databricks/cli/pull/1190)).
|
||||||
|
* Retain partially valid structs in `convert.Normalize` ([#1203](https://github.com/databricks/cli/pull/1203)).
|
||||||
|
* Skip `for_each_task` when generating the bundle schema ([#1204](https://github.com/databricks/cli/pull/1204)).
|
||||||
|
* Regenerate the CLI using the same OpenAPI spec as the SDK ([#1205](https://github.com/databricks/cli/pull/1205)).
|
||||||
|
* Avoid race-conditions while executing sub-commands ([#1201](https://github.com/databricks/cli/pull/1201)).
|
||||||
|
|
||||||
|
API Changes:
|
||||||
|
* Added `databricks tables exists` command.
|
||||||
|
* Added `databricks lakehouse-monitors` command group.
|
||||||
|
* Removed `databricks files get-status` command.
|
||||||
|
* Added `databricks files create-directory` command.
|
||||||
|
* Added `databricks files delete-directory` command.
|
||||||
|
* Added `databricks files get-directory-metadata` command.
|
||||||
|
* Added `databricks files get-metadata` command.
|
||||||
|
* Added `databricks files list-directory-contents` command.
|
||||||
|
* Removed `databricks pipelines reset` command.
|
||||||
|
* Changed `databricks account settings delete-personal-compute-setting` command with new required argument order.
|
||||||
|
* Removed `databricks account settings read-personal-compute-setting` command.
|
||||||
|
* Changed `databricks account settings update-personal-compute-setting` command with new required argument order.
|
||||||
|
* Added `databricks account settings get-personal-compute-setting` command.
|
||||||
|
* Removed `databricks settings delete-default-workspace-namespace` command.
|
||||||
|
* Removed `databricks settings read-default-workspace-namespace` command.
|
||||||
|
* Removed `databricks settings update-default-workspace-namespace` command.
|
||||||
|
* Added `databricks settings delete-default-namespace-setting` command.
|
||||||
|
* Added `databricks settings delete-restrict-workspace-admins-setting` command.
|
||||||
|
* Added `databricks settings get-default-namespace-setting` command.
|
||||||
|
* Added `databricks settings get-restrict-workspace-admins-setting` command.
|
||||||
|
* Added `databricks settings update-default-namespace-setting` command.
|
||||||
|
* Added `databricks settings update-restrict-workspace-admins-setting` command.
|
||||||
|
* Changed `databricks token-management create-obo-token` command with new required argument order.
|
||||||
|
* Changed `databricks token-management get` command to return .
|
||||||
|
* Changed `databricks dashboards create` command . New request type is .
|
||||||
|
* Added `databricks dashboards update` command.
|
||||||
|
|
||||||
|
OpenAPI commit c40670f5a2055c92cf0a6aac92a5bccebfb80866 (2024-02-14)
|
||||||
|
Dependency updates:
|
||||||
|
* Bump github.com/hashicorp/hc-install from 0.6.2 to 0.6.3 ([#1200](https://github.com/databricks/cli/pull/1200)).
|
||||||
|
* Bump golang.org/x/term from 0.16.0 to 0.17.0 ([#1197](https://github.com/databricks/cli/pull/1197)).
|
||||||
|
* Bump golang.org/x/oauth2 from 0.16.0 to 0.17.0 ([#1198](https://github.com/databricks/cli/pull/1198)).
|
||||||
|
* Bump github.com/databricks/databricks-sdk-go from 0.30.1 to 0.32.0 ([#1199](https://github.com/databricks/cli/pull/1199)).
|
||||||
|
|
||||||
|
## 0.212.4
|
||||||
|
|
||||||
|
Bundles:
|
||||||
|
* Allow specifying executable in artifact section and skip bash from WSL ([#1169](https://github.com/databricks/cli/pull/1169)).
|
||||||
|
* Added warning when trying to deploy bundle with `--fail-on-active-runs` and running resources ([#1163](https://github.com/databricks/cli/pull/1163)).
|
||||||
|
* Group bundle run flags by job and pipeline types ([#1174](https://github.com/databricks/cli/pull/1174)).
|
||||||
|
* Make sure grouped flags are added to the command flag set ([#1180](https://github.com/databricks/cli/pull/1180)).
|
||||||
|
* Add short_name helper function to bundle init templates ([#1167](https://github.com/databricks/cli/pull/1167)).
|
||||||
|
|
||||||
|
Internal:
|
||||||
|
* Fix dynamic representation of zero values in maps and slices ([#1154](https://github.com/databricks/cli/pull/1154)).
|
||||||
|
* Refactor library to artifact matching to not use pointers ([#1172](https://github.com/databricks/cli/pull/1172)).
|
||||||
|
* Harden `dyn.Value` equality check ([#1173](https://github.com/databricks/cli/pull/1173)).
|
||||||
|
* Ensure every variable reference is passed to lookup function ([#1176](https://github.com/databricks/cli/pull/1176)).
|
||||||
|
* Empty struct should yield empty map in `convert.FromTyped` ([#1177](https://github.com/databricks/cli/pull/1177)).
|
||||||
|
* Zero destination struct in `convert.ToTyped` ([#1178](https://github.com/databricks/cli/pull/1178)).
|
||||||
|
* Fix integration test with invalid configuration ([#1182](https://github.com/databricks/cli/pull/1182)).
|
||||||
|
* Use `acc.WorkspaceTest` helper from bundle integration tests ([#1181](https://github.com/databricks/cli/pull/1181)).
|
||||||
|
|
||||||
## 0.212.3
|
## 0.212.3
|
||||||
|
|
||||||
CLI:
|
CLI:
|
||||||
|
|
|
@ -0,0 +1,25 @@
|
||||||
|
FROM alpine:3.19 as builder
|
||||||
|
|
||||||
|
RUN ["apk", "add", "jq"]
|
||||||
|
RUN ["apk", "add", "bash"]
|
||||||
|
|
||||||
|
WORKDIR /build
|
||||||
|
|
||||||
|
COPY ./docker/setup.sh /build/docker/setup.sh
|
||||||
|
COPY ./databricks /app/databricks
|
||||||
|
COPY ./docker/config.tfrc /app/config/config.tfrc
|
||||||
|
|
||||||
|
ARG ARCH
|
||||||
|
RUN /build/docker/setup.sh
|
||||||
|
|
||||||
|
# Start from a fresh base image, to remove any build artifacts and scripts.
|
||||||
|
FROM alpine:3.19
|
||||||
|
|
||||||
|
ENV DATABRICKS_TF_EXEC_PATH "/app/bin/terraform"
|
||||||
|
ENV DATABRICKS_TF_CLI_CONFIG_FILE "/app/config/config.tfrc"
|
||||||
|
ENV PATH="/app:${PATH}"
|
||||||
|
|
||||||
|
COPY --from=builder /app /app
|
||||||
|
|
||||||
|
ENTRYPOINT ["/app/databricks"]
|
||||||
|
CMD ["-h"]
|
74
LICENSE
74
LICENSE
|
@ -1,25 +1,69 @@
|
||||||
DB license
|
Databricks License
|
||||||
|
Copyright (2022) Databricks, Inc.
|
||||||
|
|
||||||
Copyright (2022) Databricks, Inc.
|
Definitions.
|
||||||
|
|
||||||
Definitions.
|
Agreement: The agreement between Databricks, Inc., and you governing
|
||||||
|
the use of the Databricks Services, as that term is defined in
|
||||||
|
the Master Cloud Services Agreement (MCSA) located at
|
||||||
|
www.databricks.com/legal/mcsa.
|
||||||
|
|
||||||
Agreement: The agreement between Databricks, Inc., and you governing the use of the Databricks Services, which shall be, with respect to Databricks, the Databricks Terms of Service located at www.databricks.com/termsofservice, and with respect to Databricks Community Edition, the Community Edition Terms of Service located at www.databricks.com/ce-termsofuse, in each case unless you have entered into a separate written agreement with Databricks governing the use of the applicable Databricks Services.
|
Licensed Materials: The source code, object code, data, and/or other
|
||||||
|
works to which this license applies.
|
||||||
|
|
||||||
Software: The source code and object code to which this license applies.
|
Scope of Use. You may not use the Licensed Materials except in
|
||||||
|
connection with your use of the Databricks Services pursuant to
|
||||||
|
the Agreement. Your use of the Licensed Materials must comply at all
|
||||||
|
times with any restrictions applicable to the Databricks Services,
|
||||||
|
generally, and must be used in accordance with any applicable
|
||||||
|
documentation. You may view, use, copy, modify, publish, and/or
|
||||||
|
distribute the Licensed Materials solely for the purposes of using
|
||||||
|
the Licensed Materials within or connecting to the Databricks Services.
|
||||||
|
If you do not agree to these terms, you may not view, use, copy,
|
||||||
|
modify, publish, and/or distribute the Licensed Materials.
|
||||||
|
|
||||||
Scope of Use. You may not use this Software except in connection with your use of the Databricks Services pursuant to the Agreement. Your use of the Software must comply at all times with any restrictions applicable to the Databricks Services, generally, and must be used in accordance with any applicable documentation. You may view, use, copy, modify, publish, and/or distribute the Software solely for the purposes of using the code within or connecting to the Databricks Services. If you do not agree to these terms, you may not view, use, copy, modify, publish, and/or distribute the Software.
|
Redistribution. You may redistribute and sublicense the Licensed
|
||||||
|
Materials so long as all use is in compliance with these terms.
|
||||||
|
In addition:
|
||||||
|
|
||||||
Redistribution. You may redistribute and sublicense the Software so long as all use is in compliance with these terms. In addition:
|
- You must give any other recipients a copy of this License;
|
||||||
|
- You must cause any modified files to carry prominent notices
|
||||||
|
stating that you changed the files;
|
||||||
|
- You must retain, in any derivative works that you distribute,
|
||||||
|
all copyright, patent, trademark, and attribution notices,
|
||||||
|
excluding those notices that do not pertain to any part of
|
||||||
|
the derivative works; and
|
||||||
|
- If a "NOTICE" text file is provided as part of its
|
||||||
|
distribution, then any derivative works that you distribute
|
||||||
|
must include a readable copy of the attribution notices
|
||||||
|
contained within such NOTICE file, excluding those notices
|
||||||
|
that do not pertain to any part of the derivative works.
|
||||||
|
|
||||||
You must give any other recipients a copy of this License;
|
You may add your own copyright statement to your modifications and may
|
||||||
You must cause any modified files to carry prominent notices stating that you changed the files;
|
provide additional license terms and conditions for use, reproduction,
|
||||||
You must retain, in the source code form of any derivative works that you distribute, all copyright, patent, trademark, and attribution notices from the source code form, excluding those notices that do not pertain to any part of the derivative works; and
|
or distribution of your modifications, or for any such derivative works
|
||||||
If the source code form includes a "NOTICE" text file as part of its distribution, then any derivative works that you distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the derivative works.
|
as a whole, provided your use, reproduction, and distribution of
|
||||||
You may add your own copyright statement to your modifications and may provide additional license terms and conditions for use, reproduction, or distribution of your modifications, or for any such derivative works as a whole, provided your use, reproduction, and distribution of the Software otherwise complies with the conditions stated in this License.
|
the Licensed Materials otherwise complies with the conditions stated
|
||||||
|
in this License.
|
||||||
|
|
||||||
Termination. This license terminates automatically upon your breach of these terms or upon the termination of your Agreement. Additionally, Databricks may terminate this license at any time on notice. Upon termination, you must permanently delete the Software and all copies thereof.
|
Termination. This license terminates automatically upon your breach of
|
||||||
|
these terms or upon the termination of your Agreement. Additionally,
|
||||||
|
Databricks may terminate this license at any time on notice. Upon
|
||||||
|
termination, you must permanently delete the Licensed Materials and
|
||||||
|
all copies thereof.
|
||||||
|
|
||||||
DISCLAIMER; LIMITATION OF LIABILITY.
|
DISCLAIMER; LIMITATION OF LIABILITY.
|
||||||
|
|
||||||
THE SOFTWARE IS PROVIDED “AS-IS” AND WITH ALL FAULTS. DATABRICKS, ON BEHALF OF ITSELF AND ITS LICENSORS, SPECIFICALLY DISCLAIMS ALL WARRANTIES RELATING TO THE SOURCE CODE, EXPRESS AND IMPLIED, INCLUDING, WITHOUT LIMITATION, IMPLIED WARRANTIES, CONDITIONS AND OTHER TERMS OF MERCHANTABILITY, SATISFACTORY QUALITY OR FITNESS FOR A PARTICULAR PURPOSE, AND NON-INFRINGEMENT. DATABRICKS AND ITS LICENSORS TOTAL AGGREGATE LIABILITY RELATING TO OR ARISING OUT OF YOUR USE OF OR DATABRICKS’ PROVISIONING OF THE SOURCE CODE SHALL BE LIMITED TO ONE THOUSAND ($1,000) DOLLARS. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
THE LICENSED MATERIALS ARE PROVIDED “AS-IS” AND WITH ALL FAULTS.
|
||||||
|
DATABRICKS, ON BEHALF OF ITSELF AND ITS LICENSORS, SPECIFICALLY
|
||||||
|
DISCLAIMS ALL WARRANTIES RELATING TO THE LICENSED MATERIALS, EXPRESS
|
||||||
|
AND IMPLIED, INCLUDING, WITHOUT LIMITATION, IMPLIED WARRANTIES,
|
||||||
|
CONDITIONS AND OTHER TERMS OF MERCHANTABILITY, SATISFACTORY QUALITY OR
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE, AND NON-INFRINGEMENT. DATABRICKS AND
|
||||||
|
ITS LICENSORS TOTAL AGGREGATE LIABILITY RELATING TO OR ARISING OUT OF
|
||||||
|
YOUR USE OF OR DATABRICKS’ PROVISIONING OF THE LICENSED MATERIALS SHALL
|
||||||
|
BE LIMITED TO ONE THOUSAND ($1,000) DOLLARS. IN NO EVENT SHALL
|
||||||
|
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||||
|
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||||
|
ARISING FROM, OUT OF OR IN CONNECTION WITH THE LICENSED MATERIALS OR
|
||||||
|
THE USE OR OTHER DEALINGS IN THE LICENSED MATERIALS.
|
||||||
|
|
21
NOTICE
21
NOTICE
|
@ -16,16 +16,12 @@ go-ini/ini - https://github.com/go-ini/ini
|
||||||
Copyright ini authors
|
Copyright ini authors
|
||||||
License - https://github.com/go-ini/ini/blob/main/LICENSE
|
License - https://github.com/go-ini/ini/blob/main/LICENSE
|
||||||
|
|
||||||
uber-go/mock - https://go.uber.org/mock
|
|
||||||
Copyright Google Inc.
|
|
||||||
License - https://github.com/uber-go/mock/blob/main/LICENSE
|
|
||||||
|
|
||||||
—--
|
—--
|
||||||
|
|
||||||
This software contains code from the following open source projects, licensed under the MPL 2.0 license:
|
This software contains code from the following open source projects, licensed under the MPL 2.0 license:
|
||||||
|
|
||||||
hashicopr/go-version - https://github.com/hashicorp/go-version
|
hashicopr/go-version - https://github.com/hashicorp/go-version
|
||||||
Copyright 2014 HashiCorp, Inc.
|
Copyright 2014 HashiCorp, Inc.
|
||||||
License - https://github.com/hashicorp/go-version/blob/main/LICENSE
|
License - https://github.com/hashicorp/go-version/blob/main/LICENSE
|
||||||
|
|
||||||
hashicorp/hc-install - https://github.com/hashicorp/hc-install
|
hashicorp/hc-install - https://github.com/hashicorp/hc-install
|
||||||
|
@ -40,6 +36,10 @@ hashicorp/terraform-json - https://github.com/hashicorp/terraform-json
|
||||||
Copyright 2019 HashiCorp, Inc.
|
Copyright 2019 HashiCorp, Inc.
|
||||||
License - https://github.com/hashicorp/terraform-json/blob/main/LICENSE
|
License - https://github.com/hashicorp/terraform-json/blob/main/LICENSE
|
||||||
|
|
||||||
|
hashicorp/terraform - https://github.com/hashicorp/terraform
|
||||||
|
Copyright 2014 HashiCorp, Inc.
|
||||||
|
License - https://github.com/hashicorp/terraform/blob/v1.5.5/LICENSE
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
This software contains code from the following open source projects, licensed under the BSD (2-clause) license:
|
This software contains code from the following open source projects, licensed under the BSD (2-clause) license:
|
||||||
|
@ -61,11 +61,6 @@ google/uuid - https://github.com/google/uuid
|
||||||
Copyright (c) 2009,2014 Google Inc. All rights reserved.
|
Copyright (c) 2009,2014 Google Inc. All rights reserved.
|
||||||
License - https://github.com/google/uuid/blob/master/LICENSE
|
License - https://github.com/google/uuid/blob/master/LICENSE
|
||||||
|
|
||||||
imdario/mergo - https://github.com/imdario/mergo
|
|
||||||
Copyright (c) 2013 Dario Castañé. All rights reserved.
|
|
||||||
Copyright (c) 2012 The Go Authors. All rights reserved.
|
|
||||||
License - https://github.com/imdario/mergo/blob/master/LICENSE
|
|
||||||
|
|
||||||
manifoldco/promptui - https://github.com/manifoldco/promptui
|
manifoldco/promptui - https://github.com/manifoldco/promptui
|
||||||
Copyright (c) 2017, Arigato Machine Inc. All rights reserved.
|
Copyright (c) 2017, Arigato Machine Inc. All rights reserved.
|
||||||
License - https://github.com/manifoldco/promptui/blob/master/LICENSE.md
|
License - https://github.com/manifoldco/promptui/blob/master/LICENSE.md
|
||||||
|
@ -81,7 +76,11 @@ License - https://github.com/fatih/color/blob/main/LICENSE.md
|
||||||
ghodss/yaml - https://github.com/ghodss/yaml
|
ghodss/yaml - https://github.com/ghodss/yaml
|
||||||
Copyright (c) 2014 Sam Ghods
|
Copyright (c) 2014 Sam Ghods
|
||||||
License - https://github.com/ghodss/yaml/blob/master/LICENSE
|
License - https://github.com/ghodss/yaml/blob/master/LICENSE
|
||||||
|
|
||||||
|
Masterminds/semver - https://github.com/Masterminds/semver
|
||||||
|
Copyright (C) 2014-2019, Matt Butcher and Matt Farina
|
||||||
|
License - https://github.com/Masterminds/semver/blob/master/LICENSE.txt
|
||||||
|
|
||||||
mattn/go-isatty - https://github.com/mattn/go-isatty
|
mattn/go-isatty - https://github.com/mattn/go-isatty
|
||||||
Copyright (c) Yasuhiro MATSUMOTO <mattn.jp@gmail.com>
|
Copyright (c) Yasuhiro MATSUMOTO <mattn.jp@gmail.com>
|
||||||
https://github.com/mattn/go-isatty/blob/master/LICENSE
|
https://github.com/mattn/go-isatty/blob/master/LICENSE
|
||||||
|
|
23
README.md
23
README.md
|
@ -4,16 +4,31 @@
|
||||||
|
|
||||||
This project is in Public Preview.
|
This project is in Public Preview.
|
||||||
|
|
||||||
Documentation about the full REST API coverage is available in the [docs folder](docs/commands.md).
|
|
||||||
|
|
||||||
Documentation is available at https://docs.databricks.com/dev-tools/cli/databricks-cli.html.
|
Documentation is available at https://docs.databricks.com/dev-tools/cli/databricks-cli.html.
|
||||||
|
|
||||||
## Installation
|
## Installation
|
||||||
|
|
||||||
This CLI is packaged as a dependency-free binary executable and may be located in any directory.
|
This CLI is packaged as a dependency-free binary executable and may be located in any directory.
|
||||||
See https://github.com/databricks/cli/releases for releases and
|
See https://github.com/databricks/cli/releases for releases and
|
||||||
[the docs pages](https://docs.databricks.com/dev-tools/cli/databricks-cli.html) for
|
the [Databricks documentation](https://docs.databricks.com/en/dev-tools/cli/install.html) for detailed information about installing the CLI.
|
||||||
installation instructions.
|
|
||||||
|
------
|
||||||
|
### Homebrew
|
||||||
|
|
||||||
|
We maintain a [Homebrew tap](https://github.com/databricks/homebrew-tap) for installing the Databricks CLI. You can find instructions for how to install, upgrade and downgrade the CLI using Homebrew [here](https://github.com/databricks/homebrew-tap/blob/main/README.md).
|
||||||
|
|
||||||
|
------
|
||||||
|
### Docker
|
||||||
|
You can use the CLI via a Docker image by pulling the image from `ghcr.io`. You can find all available versions
|
||||||
|
at: https://github.com/databricks/cli/pkgs/container/cli.
|
||||||
|
```
|
||||||
|
docker pull ghcr.io/databricks/cli:latest
|
||||||
|
```
|
||||||
|
|
||||||
|
Example of how to run the CLI using the Docker image. More documentation is available at https://docs.databricks.com/dev-tools/bundles/airgapped-environment.html.
|
||||||
|
```
|
||||||
|
docker run -e DATABRICKS_HOST=$YOUR_HOST_URL -e DATABRICKS_TOKEN=$YOUR_TOKEN ghcr.io/databricks/cli:latest current-user me
|
||||||
|
```
|
||||||
|
|
||||||
## Authentication
|
## Authentication
|
||||||
|
|
||||||
|
|
|
@ -7,6 +7,7 @@ import (
|
||||||
"slices"
|
"slices"
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle"
|
"github.com/databricks/cli/bundle"
|
||||||
|
"github.com/databricks/cli/libs/diag"
|
||||||
"golang.org/x/exp/maps"
|
"golang.org/x/exp/maps"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -21,7 +22,7 @@ func (m *all) Name() string {
|
||||||
return fmt.Sprintf("artifacts.%sAll", m.name)
|
return fmt.Sprintf("artifacts.%sAll", m.name)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *all) Apply(ctx context.Context, b *bundle.Bundle) error {
|
func (m *all) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||||
var out []bundle.Mutator
|
var out []bundle.Mutator
|
||||||
|
|
||||||
// Iterate with stable ordering.
|
// Iterate with stable ordering.
|
||||||
|
@ -31,7 +32,7 @@ func (m *all) Apply(ctx context.Context, b *bundle.Bundle) error {
|
||||||
for _, name := range keys {
|
for _, name := range keys {
|
||||||
m, err := m.fn(name)
|
m, err := m.fn(name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return diag.FromErr(err)
|
||||||
}
|
}
|
||||||
if m != nil {
|
if m != nil {
|
||||||
out = append(out, m)
|
out = append(out, m)
|
||||||
|
|
|
@ -8,13 +8,17 @@ import (
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle"
|
"github.com/databricks/cli/bundle"
|
||||||
"github.com/databricks/cli/bundle/artifacts/whl"
|
"github.com/databricks/cli/bundle/artifacts/whl"
|
||||||
"github.com/databricks/cli/bundle/config"
|
"github.com/databricks/cli/bundle/config"
|
||||||
|
"github.com/databricks/cli/bundle/config/resources"
|
||||||
"github.com/databricks/cli/libs/cmdio"
|
"github.com/databricks/cli/libs/cmdio"
|
||||||
|
"github.com/databricks/cli/libs/diag"
|
||||||
"github.com/databricks/cli/libs/filer"
|
"github.com/databricks/cli/libs/filer"
|
||||||
"github.com/databricks/cli/libs/log"
|
"github.com/databricks/cli/libs/log"
|
||||||
|
"github.com/databricks/databricks-sdk-go"
|
||||||
)
|
)
|
||||||
|
|
||||||
type mutatorFactory = func(name string) bundle.Mutator
|
type mutatorFactory = func(name string) bundle.Mutator
|
||||||
|
@ -56,17 +60,17 @@ func (m *basicBuild) Name() string {
|
||||||
return fmt.Sprintf("artifacts.Build(%s)", m.name)
|
return fmt.Sprintf("artifacts.Build(%s)", m.name)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *basicBuild) Apply(ctx context.Context, b *bundle.Bundle) error {
|
func (m *basicBuild) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||||
artifact, ok := b.Config.Artifacts[m.name]
|
artifact, ok := b.Config.Artifacts[m.name]
|
||||||
if !ok {
|
if !ok {
|
||||||
return fmt.Errorf("artifact doesn't exist: %s", m.name)
|
return diag.Errorf("artifact doesn't exist: %s", m.name)
|
||||||
}
|
}
|
||||||
|
|
||||||
cmdio.LogString(ctx, fmt.Sprintf("Building %s...", m.name))
|
cmdio.LogString(ctx, fmt.Sprintf("Building %s...", m.name))
|
||||||
|
|
||||||
out, err := artifact.Build(ctx)
|
out, err := artifact.Build(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("build for %s failed, error: %w, output: %s", m.name, err, out)
|
return diag.Errorf("build for %s failed, error: %v, output: %s", m.name, err, out)
|
||||||
}
|
}
|
||||||
log.Infof(ctx, "Build succeeded")
|
log.Infof(ctx, "Build succeeded")
|
||||||
|
|
||||||
|
@ -86,54 +90,139 @@ func (m *basicUpload) Name() string {
|
||||||
return fmt.Sprintf("artifacts.Upload(%s)", m.name)
|
return fmt.Sprintf("artifacts.Upload(%s)", m.name)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *basicUpload) Apply(ctx context.Context, b *bundle.Bundle) error {
|
func (m *basicUpload) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||||
artifact, ok := b.Config.Artifacts[m.name]
|
artifact, ok := b.Config.Artifacts[m.name]
|
||||||
if !ok {
|
if !ok {
|
||||||
return fmt.Errorf("artifact doesn't exist: %s", m.name)
|
return diag.Errorf("artifact doesn't exist: %s", m.name)
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(artifact.Files) == 0 {
|
if len(artifact.Files) == 0 {
|
||||||
return fmt.Errorf("artifact source is not configured: %s", m.name)
|
return diag.Errorf("artifact source is not configured: %s", m.name)
|
||||||
}
|
}
|
||||||
|
|
||||||
uploadPath, err := getUploadBasePath(b)
|
uploadPath, err := getUploadBasePath(b)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return diag.FromErr(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
client, err := filer.NewWorkspaceFilesClient(b.WorkspaceClient(), uploadPath)
|
client, err := getFilerForArtifacts(b.WorkspaceClient(), uploadPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return diag.FromErr(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
err = uploadArtifact(ctx, artifact, uploadPath, client)
|
err = uploadArtifact(ctx, b, artifact, uploadPath, client)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("upload for %s failed, error: %w", m.name, err)
|
return diag.Errorf("upload for %s failed, error: %v", m.name, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func uploadArtifact(ctx context.Context, a *config.Artifact, uploadPath string, client filer.Filer) error {
|
func getFilerForArtifacts(w *databricks.WorkspaceClient, uploadPath string) (filer.Filer, error) {
|
||||||
|
if isVolumesPath(uploadPath) {
|
||||||
|
return filer.NewFilesClient(w, uploadPath)
|
||||||
|
}
|
||||||
|
return filer.NewWorkspaceFilesClient(w, uploadPath)
|
||||||
|
}
|
||||||
|
|
||||||
|
func isVolumesPath(path string) bool {
|
||||||
|
return strings.HasPrefix(path, "/Volumes/")
|
||||||
|
}
|
||||||
|
|
||||||
|
func uploadArtifact(ctx context.Context, b *bundle.Bundle, a *config.Artifact, uploadPath string, client filer.Filer) error {
|
||||||
for i := range a.Files {
|
for i := range a.Files {
|
||||||
f := &a.Files[i]
|
f := &a.Files[i]
|
||||||
if f.NeedsUpload() {
|
|
||||||
filename := filepath.Base(f.Source)
|
|
||||||
cmdio.LogString(ctx, fmt.Sprintf("Uploading %s...", filename))
|
|
||||||
|
|
||||||
err := uploadArtifactFile(ctx, f.Source, client)
|
filename := filepath.Base(f.Source)
|
||||||
if err != nil {
|
cmdio.LogString(ctx, fmt.Sprintf("Uploading %s...", filename))
|
||||||
return err
|
|
||||||
}
|
err := uploadArtifactFile(ctx, f.Source, client)
|
||||||
log.Infof(ctx, "Upload succeeded")
|
if err != nil {
|
||||||
f.RemotePath = path.Join(uploadPath, filepath.Base(f.Source))
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Infof(ctx, "Upload succeeded")
|
||||||
|
f.RemotePath = path.Join(uploadPath, filepath.Base(f.Source))
|
||||||
|
remotePath := f.RemotePath
|
||||||
|
|
||||||
|
if !strings.HasPrefix(f.RemotePath, "/Workspace/") && !strings.HasPrefix(f.RemotePath, "/Volumes/") {
|
||||||
|
wsfsBase := "/Workspace"
|
||||||
|
remotePath = path.Join(wsfsBase, f.RemotePath)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, job := range b.Config.Resources.Jobs {
|
||||||
|
rewriteArtifactPath(b, f, job, remotePath)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
a.NormalisePaths()
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func rewriteArtifactPath(b *bundle.Bundle, f *config.ArtifactFile, job *resources.Job, remotePath string) {
|
||||||
|
// Rewrite artifact path in job task libraries
|
||||||
|
for i := range job.Tasks {
|
||||||
|
task := &job.Tasks[i]
|
||||||
|
for j := range task.Libraries {
|
||||||
|
lib := &task.Libraries[j]
|
||||||
|
if lib.Whl != "" && isArtifactMatchLibrary(f, lib.Whl, b) {
|
||||||
|
lib.Whl = remotePath
|
||||||
|
}
|
||||||
|
if lib.Jar != "" && isArtifactMatchLibrary(f, lib.Jar, b) {
|
||||||
|
lib.Jar = remotePath
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Rewrite artifact path in job task libraries for ForEachTask
|
||||||
|
if task.ForEachTask != nil {
|
||||||
|
forEachTask := task.ForEachTask
|
||||||
|
for j := range forEachTask.Task.Libraries {
|
||||||
|
lib := &forEachTask.Task.Libraries[j]
|
||||||
|
if lib.Whl != "" && isArtifactMatchLibrary(f, lib.Whl, b) {
|
||||||
|
lib.Whl = remotePath
|
||||||
|
}
|
||||||
|
if lib.Jar != "" && isArtifactMatchLibrary(f, lib.Jar, b) {
|
||||||
|
lib.Jar = remotePath
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Rewrite artifact path in job environments
|
||||||
|
for i := range job.Environments {
|
||||||
|
env := &job.Environments[i]
|
||||||
|
if env.Spec == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
for j := range env.Spec.Dependencies {
|
||||||
|
lib := env.Spec.Dependencies[j]
|
||||||
|
if isArtifactMatchLibrary(f, lib, b) {
|
||||||
|
env.Spec.Dependencies[j] = remotePath
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func isArtifactMatchLibrary(f *config.ArtifactFile, libPath string, b *bundle.Bundle) bool {
|
||||||
|
if !filepath.IsAbs(libPath) {
|
||||||
|
libPath = filepath.Join(b.RootPath, libPath)
|
||||||
|
}
|
||||||
|
|
||||||
|
// libPath can be a glob pattern, so do the match first
|
||||||
|
matches, err := filepath.Glob(libPath)
|
||||||
|
if err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, m := range matches {
|
||||||
|
if m == f.Source {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
// Function to upload artifact file to Workspace
|
// Function to upload artifact file to Workspace
|
||||||
func uploadArtifactFile(ctx context.Context, file string, client filer.Filer) error {
|
func uploadArtifactFile(ctx context.Context, file string, client filer.Filer) error {
|
||||||
raw, err := os.ReadFile(file)
|
raw, err := os.ReadFile(file)
|
||||||
|
|
|
@ -0,0 +1,196 @@
|
||||||
|
package artifacts
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"path/filepath"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/databricks/cli/bundle"
|
||||||
|
"github.com/databricks/cli/bundle/config"
|
||||||
|
"github.com/databricks/cli/bundle/config/resources"
|
||||||
|
mockfiler "github.com/databricks/cli/internal/mocks/libs/filer"
|
||||||
|
"github.com/databricks/cli/internal/testutil"
|
||||||
|
"github.com/databricks/cli/libs/filer"
|
||||||
|
"github.com/databricks/databricks-sdk-go/service/compute"
|
||||||
|
"github.com/databricks/databricks-sdk-go/service/jobs"
|
||||||
|
"github.com/stretchr/testify/mock"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestArtifactUploadForWorkspace(t *testing.T) {
|
||||||
|
tmpDir := t.TempDir()
|
||||||
|
whlFolder := filepath.Join(tmpDir, "whl")
|
||||||
|
testutil.Touch(t, whlFolder, "source.whl")
|
||||||
|
whlLocalPath := filepath.Join(whlFolder, "source.whl")
|
||||||
|
|
||||||
|
b := &bundle.Bundle{
|
||||||
|
RootPath: tmpDir,
|
||||||
|
Config: config.Root{
|
||||||
|
Workspace: config.Workspace{
|
||||||
|
ArtifactPath: "/foo/bar/artifacts",
|
||||||
|
},
|
||||||
|
Artifacts: config.Artifacts{
|
||||||
|
"whl": {
|
||||||
|
Type: config.ArtifactPythonWheel,
|
||||||
|
Files: []config.ArtifactFile{
|
||||||
|
{Source: whlLocalPath},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Resources: config.Resources{
|
||||||
|
Jobs: map[string]*resources.Job{
|
||||||
|
"job": {
|
||||||
|
JobSettings: &jobs.JobSettings{
|
||||||
|
Tasks: []jobs.Task{
|
||||||
|
{
|
||||||
|
Libraries: []compute.Library{
|
||||||
|
{
|
||||||
|
Whl: filepath.Join("whl", "*.whl"),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Whl: "/Workspace/Users/foo@bar.com/mywheel.whl",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ForEachTask: &jobs.ForEachTask{
|
||||||
|
Task: jobs.Task{
|
||||||
|
Libraries: []compute.Library{
|
||||||
|
{
|
||||||
|
Whl: filepath.Join("whl", "*.whl"),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Whl: "/Workspace/Users/foo@bar.com/mywheel.whl",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Environments: []jobs.JobEnvironment{
|
||||||
|
{
|
||||||
|
Spec: &compute.Environment{
|
||||||
|
Dependencies: []string{
|
||||||
|
filepath.Join("whl", "source.whl"),
|
||||||
|
"/Workspace/Users/foo@bar.com/mywheel.whl",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
artifact := b.Config.Artifacts["whl"]
|
||||||
|
mockFiler := mockfiler.NewMockFiler(t)
|
||||||
|
mockFiler.EXPECT().Write(
|
||||||
|
mock.Anything,
|
||||||
|
filepath.Join("source.whl"),
|
||||||
|
mock.AnythingOfType("*bytes.Reader"),
|
||||||
|
filer.OverwriteIfExists,
|
||||||
|
filer.CreateParentDirectories,
|
||||||
|
).Return(nil)
|
||||||
|
|
||||||
|
err := uploadArtifact(context.Background(), b, artifact, "/foo/bar/artifacts", mockFiler)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Test that libraries path is updated
|
||||||
|
require.Equal(t, "/Workspace/foo/bar/artifacts/source.whl", b.Config.Resources.Jobs["job"].JobSettings.Tasks[0].Libraries[0].Whl)
|
||||||
|
require.Equal(t, "/Workspace/Users/foo@bar.com/mywheel.whl", b.Config.Resources.Jobs["job"].JobSettings.Tasks[0].Libraries[1].Whl)
|
||||||
|
require.Equal(t, "/Workspace/foo/bar/artifacts/source.whl", b.Config.Resources.Jobs["job"].JobSettings.Environments[0].Spec.Dependencies[0])
|
||||||
|
require.Equal(t, "/Workspace/Users/foo@bar.com/mywheel.whl", b.Config.Resources.Jobs["job"].JobSettings.Environments[0].Spec.Dependencies[1])
|
||||||
|
require.Equal(t, "/Workspace/foo/bar/artifacts/source.whl", b.Config.Resources.Jobs["job"].JobSettings.Tasks[1].ForEachTask.Task.Libraries[0].Whl)
|
||||||
|
require.Equal(t, "/Workspace/Users/foo@bar.com/mywheel.whl", b.Config.Resources.Jobs["job"].JobSettings.Tasks[1].ForEachTask.Task.Libraries[1].Whl)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestArtifactUploadForVolumes(t *testing.T) {
|
||||||
|
tmpDir := t.TempDir()
|
||||||
|
whlFolder := filepath.Join(tmpDir, "whl")
|
||||||
|
testutil.Touch(t, whlFolder, "source.whl")
|
||||||
|
whlLocalPath := filepath.Join(whlFolder, "source.whl")
|
||||||
|
|
||||||
|
b := &bundle.Bundle{
|
||||||
|
RootPath: tmpDir,
|
||||||
|
Config: config.Root{
|
||||||
|
Workspace: config.Workspace{
|
||||||
|
ArtifactPath: "/Volumes/foo/bar/artifacts",
|
||||||
|
},
|
||||||
|
Artifacts: config.Artifacts{
|
||||||
|
"whl": {
|
||||||
|
Type: config.ArtifactPythonWheel,
|
||||||
|
Files: []config.ArtifactFile{
|
||||||
|
{Source: whlLocalPath},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Resources: config.Resources{
|
||||||
|
Jobs: map[string]*resources.Job{
|
||||||
|
"job": {
|
||||||
|
JobSettings: &jobs.JobSettings{
|
||||||
|
Tasks: []jobs.Task{
|
||||||
|
{
|
||||||
|
Libraries: []compute.Library{
|
||||||
|
{
|
||||||
|
Whl: filepath.Join("whl", "*.whl"),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Whl: "/Volumes/some/path/mywheel.whl",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ForEachTask: &jobs.ForEachTask{
|
||||||
|
Task: jobs.Task{
|
||||||
|
Libraries: []compute.Library{
|
||||||
|
{
|
||||||
|
Whl: filepath.Join("whl", "*.whl"),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Whl: "/Volumes/some/path/mywheel.whl",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Environments: []jobs.JobEnvironment{
|
||||||
|
{
|
||||||
|
Spec: &compute.Environment{
|
||||||
|
Dependencies: []string{
|
||||||
|
filepath.Join("whl", "source.whl"),
|
||||||
|
"/Volumes/some/path/mywheel.whl",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
artifact := b.Config.Artifacts["whl"]
|
||||||
|
mockFiler := mockfiler.NewMockFiler(t)
|
||||||
|
mockFiler.EXPECT().Write(
|
||||||
|
mock.Anything,
|
||||||
|
filepath.Join("source.whl"),
|
||||||
|
mock.AnythingOfType("*bytes.Reader"),
|
||||||
|
filer.OverwriteIfExists,
|
||||||
|
filer.CreateParentDirectories,
|
||||||
|
).Return(nil)
|
||||||
|
|
||||||
|
err := uploadArtifact(context.Background(), b, artifact, "/Volumes/foo/bar/artifacts", mockFiler)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Test that libraries path is updated
|
||||||
|
require.Equal(t, "/Volumes/foo/bar/artifacts/source.whl", b.Config.Resources.Jobs["job"].JobSettings.Tasks[0].Libraries[0].Whl)
|
||||||
|
require.Equal(t, "/Volumes/some/path/mywheel.whl", b.Config.Resources.Jobs["job"].JobSettings.Tasks[0].Libraries[1].Whl)
|
||||||
|
require.Equal(t, "/Volumes/foo/bar/artifacts/source.whl", b.Config.Resources.Jobs["job"].JobSettings.Environments[0].Spec.Dependencies[0])
|
||||||
|
require.Equal(t, "/Volumes/some/path/mywheel.whl", b.Config.Resources.Jobs["job"].JobSettings.Environments[0].Spec.Dependencies[1])
|
||||||
|
require.Equal(t, "/Volumes/foo/bar/artifacts/source.whl", b.Config.Resources.Jobs["job"].JobSettings.Tasks[1].ForEachTask.Task.Libraries[0].Whl)
|
||||||
|
require.Equal(t, "/Volumes/some/path/mywheel.whl", b.Config.Resources.Jobs["job"].JobSettings.Tasks[1].ForEachTask.Task.Libraries[1].Whl)
|
||||||
|
}
|
|
@ -5,6 +5,7 @@ import (
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle"
|
"github.com/databricks/cli/bundle"
|
||||||
"github.com/databricks/cli/bundle/artifacts/whl"
|
"github.com/databricks/cli/bundle/artifacts/whl"
|
||||||
|
"github.com/databricks/cli/libs/diag"
|
||||||
"github.com/databricks/cli/libs/log"
|
"github.com/databricks/cli/libs/log"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -19,7 +20,7 @@ func (m *autodetect) Name() string {
|
||||||
return "artifacts.DetectPackages"
|
return "artifacts.DetectPackages"
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *autodetect) Apply(ctx context.Context, b *bundle.Bundle) error {
|
func (m *autodetect) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||||
// If artifacts section explicitly defined, do not try to auto detect packages
|
// If artifacts section explicitly defined, do not try to auto detect packages
|
||||||
if b.Config.Artifacts != nil {
|
if b.Config.Artifacts != nil {
|
||||||
log.Debugf(ctx, "artifacts block is defined, skipping auto-detecting")
|
log.Debugf(ctx, "artifacts block is defined, skipping auto-detecting")
|
||||||
|
|
|
@ -6,6 +6,8 @@ import (
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle"
|
"github.com/databricks/cli/bundle"
|
||||||
|
"github.com/databricks/cli/bundle/config"
|
||||||
|
"github.com/databricks/cli/libs/diag"
|
||||||
)
|
)
|
||||||
|
|
||||||
func BuildAll() bundle.Mutator {
|
func BuildAll() bundle.Mutator {
|
||||||
|
@ -27,10 +29,19 @@ func (m *build) Name() string {
|
||||||
return fmt.Sprintf("artifacts.Build(%s)", m.name)
|
return fmt.Sprintf("artifacts.Build(%s)", m.name)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *build) Apply(ctx context.Context, b *bundle.Bundle) error {
|
func (m *build) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||||
artifact, ok := b.Config.Artifacts[m.name]
|
artifact, ok := b.Config.Artifacts[m.name]
|
||||||
if !ok {
|
if !ok {
|
||||||
return fmt.Errorf("artifact doesn't exist: %s", m.name)
|
return diag.Errorf("artifact doesn't exist: %s", m.name)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if source paths are absolute, if not, make them absolute
|
||||||
|
for k := range artifact.Files {
|
||||||
|
f := &artifact.Files[k]
|
||||||
|
if !filepath.IsAbs(f.Source) {
|
||||||
|
dirPath := filepath.Dir(artifact.ConfigFilePath)
|
||||||
|
f.Source = filepath.Join(dirPath, f.Source)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Skip building if build command is not specified or infered
|
// Skip building if build command is not specified or infered
|
||||||
|
@ -38,19 +49,59 @@ func (m *build) Apply(ctx context.Context, b *bundle.Bundle) error {
|
||||||
// If no build command was specified or infered and there is no
|
// If no build command was specified or infered and there is no
|
||||||
// artifact output files specified, artifact is misconfigured
|
// artifact output files specified, artifact is misconfigured
|
||||||
if len(artifact.Files) == 0 {
|
if len(artifact.Files) == 0 {
|
||||||
return fmt.Errorf("misconfigured artifact: please specify 'build' or 'files' property")
|
return diag.Errorf("misconfigured artifact: please specify 'build' or 'files' property")
|
||||||
}
|
}
|
||||||
return nil
|
|
||||||
|
// We can skip calling build mutator if there is no build command
|
||||||
|
// But we still need to expand glob references in files source path.
|
||||||
|
diags := expandGlobReference(artifact)
|
||||||
|
return diags
|
||||||
}
|
}
|
||||||
|
|
||||||
// If artifact path is not provided, use bundle root dir
|
// If artifact path is not provided, use bundle root dir
|
||||||
if artifact.Path == "" {
|
if artifact.Path == "" {
|
||||||
artifact.Path = b.Config.Path
|
artifact.Path = b.RootPath
|
||||||
}
|
}
|
||||||
|
|
||||||
if !filepath.IsAbs(artifact.Path) {
|
if !filepath.IsAbs(artifact.Path) {
|
||||||
artifact.Path = filepath.Join(b.Config.Path, artifact.Path)
|
dirPath := filepath.Dir(artifact.ConfigFilePath)
|
||||||
|
artifact.Path = filepath.Join(dirPath, artifact.Path)
|
||||||
}
|
}
|
||||||
|
|
||||||
return bundle.Apply(ctx, b, getBuildMutator(artifact.Type, m.name))
|
diags := bundle.Apply(ctx, b, getBuildMutator(artifact.Type, m.name))
|
||||||
|
if diags.HasError() {
|
||||||
|
return diags
|
||||||
|
}
|
||||||
|
|
||||||
|
// We need to expand glob reference after build mutator is applied because
|
||||||
|
// if we do it before, any files that are generated by build command will
|
||||||
|
// not be included into artifact.Files and thus will not be uploaded.
|
||||||
|
d := expandGlobReference(artifact)
|
||||||
|
return diags.Extend(d)
|
||||||
|
}
|
||||||
|
|
||||||
|
func expandGlobReference(artifact *config.Artifact) diag.Diagnostics {
|
||||||
|
var diags diag.Diagnostics
|
||||||
|
|
||||||
|
// Expand any glob reference in files source path
|
||||||
|
files := make([]config.ArtifactFile, 0, len(artifact.Files))
|
||||||
|
for _, f := range artifact.Files {
|
||||||
|
matches, err := filepath.Glob(f.Source)
|
||||||
|
if err != nil {
|
||||||
|
return diags.Extend(diag.Errorf("unable to find files for %s: %v", f.Source, err))
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(matches) == 0 {
|
||||||
|
return diags.Extend(diag.Errorf("no files found for %s", f.Source))
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, match := range matches {
|
||||||
|
files = append(files, config.ArtifactFile{
|
||||||
|
Source: match,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
artifact.Files = files
|
||||||
|
return diags
|
||||||
}
|
}
|
||||||
|
|
|
@ -7,6 +7,7 @@ import (
|
||||||
"github.com/databricks/cli/bundle"
|
"github.com/databricks/cli/bundle"
|
||||||
"github.com/databricks/cli/bundle/artifacts/whl"
|
"github.com/databricks/cli/bundle/artifacts/whl"
|
||||||
"github.com/databricks/cli/bundle/config"
|
"github.com/databricks/cli/bundle/config"
|
||||||
|
"github.com/databricks/cli/libs/diag"
|
||||||
)
|
)
|
||||||
|
|
||||||
var inferMutators map[config.ArtifactType]mutatorFactory = map[config.ArtifactType]mutatorFactory{
|
var inferMutators map[config.ArtifactType]mutatorFactory = map[config.ArtifactType]mutatorFactory{
|
||||||
|
@ -41,10 +42,10 @@ func (m *infer) Name() string {
|
||||||
return fmt.Sprintf("artifacts.Infer(%s)", m.name)
|
return fmt.Sprintf("artifacts.Infer(%s)", m.name)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *infer) Apply(ctx context.Context, b *bundle.Bundle) error {
|
func (m *infer) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||||
artifact, ok := b.Config.Artifacts[m.name]
|
artifact, ok := b.Config.Artifacts[m.name]
|
||||||
if !ok {
|
if !ok {
|
||||||
return fmt.Errorf("artifact doesn't exist: %s", m.name)
|
return diag.Errorf("artifact doesn't exist: %s", m.name)
|
||||||
}
|
}
|
||||||
|
|
||||||
// only try to infer command if it's not already defined
|
// only try to infer command if it's not already defined
|
||||||
|
|
|
@ -5,7 +5,9 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle"
|
"github.com/databricks/cli/bundle"
|
||||||
"github.com/databricks/databricks-sdk-go/service/workspace"
|
"github.com/databricks/cli/libs/diag"
|
||||||
|
"github.com/databricks/cli/libs/filer"
|
||||||
|
"github.com/databricks/cli/libs/log"
|
||||||
)
|
)
|
||||||
|
|
||||||
func UploadAll() bundle.Mutator {
|
func UploadAll() bundle.Mutator {
|
||||||
|
@ -31,14 +33,14 @@ func (m *upload) Name() string {
|
||||||
return fmt.Sprintf("artifacts.Upload(%s)", m.name)
|
return fmt.Sprintf("artifacts.Upload(%s)", m.name)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *upload) Apply(ctx context.Context, b *bundle.Bundle) error {
|
func (m *upload) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||||
artifact, ok := b.Config.Artifacts[m.name]
|
artifact, ok := b.Config.Artifacts[m.name]
|
||||||
if !ok {
|
if !ok {
|
||||||
return fmt.Errorf("artifact doesn't exist: %s", m.name)
|
return diag.Errorf("artifact doesn't exist: %s", m.name)
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(artifact.Files) == 0 {
|
if len(artifact.Files) == 0 {
|
||||||
return fmt.Errorf("artifact source is not configured: %s", m.name)
|
return diag.Errorf("artifact source is not configured: %s", m.name)
|
||||||
}
|
}
|
||||||
|
|
||||||
return bundle.Apply(ctx, b, getUploadMutator(artifact.Type, m.name))
|
return bundle.Apply(ctx, b, getUploadMutator(artifact.Type, m.name))
|
||||||
|
@ -50,20 +52,26 @@ func (m *cleanUp) Name() string {
|
||||||
return "artifacts.CleanUp"
|
return "artifacts.CleanUp"
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *cleanUp) Apply(ctx context.Context, b *bundle.Bundle) error {
|
func (m *cleanUp) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||||
uploadPath, err := getUploadBasePath(b)
|
uploadPath, err := getUploadBasePath(b)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return diag.FromErr(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
b.WorkspaceClient().Workspace.Delete(ctx, workspace.Delete{
|
client, err := getFilerForArtifacts(b.WorkspaceClient(), uploadPath)
|
||||||
Path: uploadPath,
|
|
||||||
Recursive: true,
|
|
||||||
})
|
|
||||||
|
|
||||||
err = b.WorkspaceClient().Workspace.MkdirsByPath(ctx, uploadPath)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("unable to create directory for %s: %w", uploadPath, err)
|
return diag.FromErr(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// We intentionally ignore the error because it is not critical to the deployment
|
||||||
|
err = client.Delete(ctx, ".", filer.DeleteRecursively)
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf(ctx, "failed to delete %s: %v", uploadPath, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = client.Mkdir(ctx, ".")
|
||||||
|
if err != nil {
|
||||||
|
return diag.Errorf("unable to create directory for %s: %v", uploadPath, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
|
|
@ -0,0 +1,109 @@
|
||||||
|
package artifacts
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/databricks/cli/bundle"
|
||||||
|
"github.com/databricks/cli/bundle/config"
|
||||||
|
"github.com/databricks/cli/bundle/internal/bundletest"
|
||||||
|
"github.com/databricks/cli/libs/diag"
|
||||||
|
"github.com/databricks/cli/libs/testfile"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
type noop struct{}
|
||||||
|
|
||||||
|
func (n *noop) Apply(context.Context, *bundle.Bundle) diag.Diagnostics {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *noop) Name() string {
|
||||||
|
return "noop"
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestExpandGlobFilesSource(t *testing.T) {
|
||||||
|
rootPath := t.TempDir()
|
||||||
|
err := os.Mkdir(filepath.Join(rootPath, "test"), 0755)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
t1 := testfile.CreateFile(t, filepath.Join(rootPath, "test", "myjar1.jar"))
|
||||||
|
t1.Close(t)
|
||||||
|
|
||||||
|
t2 := testfile.CreateFile(t, filepath.Join(rootPath, "test", "myjar2.jar"))
|
||||||
|
t2.Close(t)
|
||||||
|
|
||||||
|
b := &bundle.Bundle{
|
||||||
|
RootPath: rootPath,
|
||||||
|
Config: config.Root{
|
||||||
|
Artifacts: map[string]*config.Artifact{
|
||||||
|
"test": {
|
||||||
|
Type: "custom",
|
||||||
|
Files: []config.ArtifactFile{
|
||||||
|
{
|
||||||
|
Source: filepath.Join("..", "test", "*.jar"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
bundletest.SetLocation(b, ".", filepath.Join(rootPath, "resources", "artifacts.yml"))
|
||||||
|
|
||||||
|
u := &upload{"test"}
|
||||||
|
uploadMutators[config.ArtifactType("custom")] = func(name string) bundle.Mutator {
|
||||||
|
return &noop{}
|
||||||
|
}
|
||||||
|
|
||||||
|
bm := &build{"test"}
|
||||||
|
buildMutators[config.ArtifactType("custom")] = func(name string) bundle.Mutator {
|
||||||
|
return &noop{}
|
||||||
|
}
|
||||||
|
|
||||||
|
diags := bundle.Apply(context.Background(), b, bundle.Seq(bm, u))
|
||||||
|
require.NoError(t, diags.Error())
|
||||||
|
|
||||||
|
require.Equal(t, 2, len(b.Config.Artifacts["test"].Files))
|
||||||
|
require.Equal(t, filepath.Join(rootPath, "test", "myjar1.jar"), b.Config.Artifacts["test"].Files[0].Source)
|
||||||
|
require.Equal(t, filepath.Join(rootPath, "test", "myjar2.jar"), b.Config.Artifacts["test"].Files[1].Source)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestExpandGlobFilesSourceWithNoMatches(t *testing.T) {
|
||||||
|
rootPath := t.TempDir()
|
||||||
|
err := os.Mkdir(filepath.Join(rootPath, "test"), 0755)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
b := &bundle.Bundle{
|
||||||
|
RootPath: rootPath,
|
||||||
|
Config: config.Root{
|
||||||
|
Artifacts: map[string]*config.Artifact{
|
||||||
|
"test": {
|
||||||
|
Type: "custom",
|
||||||
|
Files: []config.ArtifactFile{
|
||||||
|
{
|
||||||
|
Source: filepath.Join("..", "test", "myjar.jar"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
bundletest.SetLocation(b, ".", filepath.Join(rootPath, "resources", "artifacts.yml"))
|
||||||
|
|
||||||
|
u := &upload{"test"}
|
||||||
|
uploadMutators[config.ArtifactType("custom")] = func(name string) bundle.Mutator {
|
||||||
|
return &noop{}
|
||||||
|
}
|
||||||
|
|
||||||
|
bm := &build{"test"}
|
||||||
|
buildMutators[config.ArtifactType("custom")] = func(name string) bundle.Mutator {
|
||||||
|
return &noop{}
|
||||||
|
}
|
||||||
|
|
||||||
|
diags := bundle.Apply(context.Background(), b, bundle.Seq(bm, u))
|
||||||
|
require.ErrorContains(t, diags.Error(), "no files found for")
|
||||||
|
}
|
|
@ -11,6 +11,7 @@ import (
|
||||||
"github.com/databricks/cli/bundle"
|
"github.com/databricks/cli/bundle"
|
||||||
"github.com/databricks/cli/bundle/config"
|
"github.com/databricks/cli/bundle/config"
|
||||||
"github.com/databricks/cli/bundle/libraries"
|
"github.com/databricks/cli/bundle/libraries"
|
||||||
|
"github.com/databricks/cli/libs/diag"
|
||||||
"github.com/databricks/cli/libs/log"
|
"github.com/databricks/cli/libs/log"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -25,7 +26,7 @@ func (m *detectPkg) Name() string {
|
||||||
return "artifacts.whl.AutoDetect"
|
return "artifacts.whl.AutoDetect"
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *detectPkg) Apply(ctx context.Context, b *bundle.Bundle) error {
|
func (m *detectPkg) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||||
wheelTasks := libraries.FindAllWheelTasksWithLocalLibraries(b)
|
wheelTasks := libraries.FindAllWheelTasksWithLocalLibraries(b)
|
||||||
if len(wheelTasks) == 0 {
|
if len(wheelTasks) == 0 {
|
||||||
log.Infof(ctx, "No local wheel tasks in databricks.yml config, skipping auto detect")
|
log.Infof(ctx, "No local wheel tasks in databricks.yml config, skipping auto detect")
|
||||||
|
@ -34,23 +35,23 @@ func (m *detectPkg) Apply(ctx context.Context, b *bundle.Bundle) error {
|
||||||
log.Infof(ctx, "Detecting Python wheel project...")
|
log.Infof(ctx, "Detecting Python wheel project...")
|
||||||
|
|
||||||
// checking if there is setup.py in the bundle root
|
// checking if there is setup.py in the bundle root
|
||||||
setupPy := filepath.Join(b.Config.Path, "setup.py")
|
setupPy := filepath.Join(b.RootPath, "setup.py")
|
||||||
_, err := os.Stat(setupPy)
|
_, err := os.Stat(setupPy)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Infof(ctx, "No Python wheel project found at bundle root folder")
|
log.Infof(ctx, "No Python wheel project found at bundle root folder")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Infof(ctx, fmt.Sprintf("Found Python wheel project at %s", b.Config.Path))
|
log.Infof(ctx, fmt.Sprintf("Found Python wheel project at %s", b.RootPath))
|
||||||
module := extractModuleName(setupPy)
|
module := extractModuleName(setupPy)
|
||||||
|
|
||||||
if b.Config.Artifacts == nil {
|
if b.Config.Artifacts == nil {
|
||||||
b.Config.Artifacts = make(map[string]*config.Artifact)
|
b.Config.Artifacts = make(map[string]*config.Artifact)
|
||||||
}
|
}
|
||||||
|
|
||||||
pkgPath, err := filepath.Abs(b.Config.Path)
|
pkgPath, err := filepath.Abs(b.RootPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return diag.FromErr(err)
|
||||||
}
|
}
|
||||||
b.Config.Artifacts[module] = &config.Artifact{
|
b.Config.Artifacts[module] = &config.Artifact{
|
||||||
Path: pkgPath,
|
Path: pkgPath,
|
||||||
|
|
|
@ -9,6 +9,7 @@ import (
|
||||||
"github.com/databricks/cli/bundle"
|
"github.com/databricks/cli/bundle"
|
||||||
"github.com/databricks/cli/bundle/config"
|
"github.com/databricks/cli/bundle/config"
|
||||||
"github.com/databricks/cli/libs/cmdio"
|
"github.com/databricks/cli/libs/cmdio"
|
||||||
|
"github.com/databricks/cli/libs/diag"
|
||||||
"github.com/databricks/cli/libs/log"
|
"github.com/databricks/cli/libs/log"
|
||||||
"github.com/databricks/cli/libs/python"
|
"github.com/databricks/cli/libs/python"
|
||||||
)
|
)
|
||||||
|
@ -27,10 +28,10 @@ func (m *build) Name() string {
|
||||||
return fmt.Sprintf("artifacts.whl.Build(%s)", m.name)
|
return fmt.Sprintf("artifacts.whl.Build(%s)", m.name)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *build) Apply(ctx context.Context, b *bundle.Bundle) error {
|
func (m *build) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||||
artifact, ok := b.Config.Artifacts[m.name]
|
artifact, ok := b.Config.Artifacts[m.name]
|
||||||
if !ok {
|
if !ok {
|
||||||
return fmt.Errorf("artifact doesn't exist: %s", m.name)
|
return diag.Errorf("artifact doesn't exist: %s", m.name)
|
||||||
}
|
}
|
||||||
|
|
||||||
cmdio.LogString(ctx, fmt.Sprintf("Building %s...", m.name))
|
cmdio.LogString(ctx, fmt.Sprintf("Building %s...", m.name))
|
||||||
|
@ -43,13 +44,13 @@ func (m *build) Apply(ctx context.Context, b *bundle.Bundle) error {
|
||||||
|
|
||||||
out, err := artifact.Build(ctx)
|
out, err := artifact.Build(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("build failed %s, error: %w, output: %s", m.name, err, out)
|
return diag.Errorf("build failed %s, error: %v, output: %s", m.name, err, out)
|
||||||
}
|
}
|
||||||
log.Infof(ctx, "Build succeeded")
|
log.Infof(ctx, "Build succeeded")
|
||||||
|
|
||||||
wheels := python.FindFilesWithSuffixInPath(distPath, ".whl")
|
wheels := python.FindFilesWithSuffixInPath(distPath, ".whl")
|
||||||
if len(wheels) == 0 {
|
if len(wheels) == 0 {
|
||||||
return fmt.Errorf("cannot find built wheel in %s for package %s", dir, m.name)
|
return diag.Errorf("cannot find built wheel in %s for package %s", dir, m.name)
|
||||||
}
|
}
|
||||||
for _, wheel := range wheels {
|
for _, wheel := range wheels {
|
||||||
artifact.Files = append(artifact.Files, config.ArtifactFile{
|
artifact.Files = append(artifact.Files, config.ArtifactFile{
|
||||||
|
|
|
@ -7,6 +7,7 @@ import (
|
||||||
"github.com/databricks/cli/bundle"
|
"github.com/databricks/cli/bundle"
|
||||||
"github.com/databricks/cli/bundle/config"
|
"github.com/databricks/cli/bundle/config"
|
||||||
"github.com/databricks/cli/bundle/libraries"
|
"github.com/databricks/cli/bundle/libraries"
|
||||||
|
"github.com/databricks/cli/libs/diag"
|
||||||
"github.com/databricks/cli/libs/log"
|
"github.com/databricks/cli/libs/log"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -20,7 +21,7 @@ func (m *fromLibraries) Name() string {
|
||||||
return "artifacts.whl.DefineArtifactsFromLibraries"
|
return "artifacts.whl.DefineArtifactsFromLibraries"
|
||||||
}
|
}
|
||||||
|
|
||||||
func (*fromLibraries) Apply(ctx context.Context, b *bundle.Bundle) error {
|
func (*fromLibraries) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||||
if len(b.Config.Artifacts) != 0 {
|
if len(b.Config.Artifacts) != 0 {
|
||||||
log.Debugf(ctx, "Skipping defining artifacts from libraries because artifacts section is explicitly defined")
|
log.Debugf(ctx, "Skipping defining artifacts from libraries because artifacts section is explicitly defined")
|
||||||
return nil
|
return nil
|
||||||
|
@ -29,24 +30,18 @@ func (*fromLibraries) Apply(ctx context.Context, b *bundle.Bundle) error {
|
||||||
tasks := libraries.FindAllWheelTasksWithLocalLibraries(b)
|
tasks := libraries.FindAllWheelTasksWithLocalLibraries(b)
|
||||||
for _, task := range tasks {
|
for _, task := range tasks {
|
||||||
for _, lib := range task.Libraries {
|
for _, lib := range task.Libraries {
|
||||||
matches, err := filepath.Glob(filepath.Join(b.Config.Path, lib.Whl))
|
matchAndAdd(ctx, lib.Whl, b)
|
||||||
// File referenced from libraries section does not exists, skipping
|
}
|
||||||
if err != nil {
|
}
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, match := range matches {
|
envs := libraries.FindAllEnvironments(b)
|
||||||
name := filepath.Base(match)
|
for _, jobEnvs := range envs {
|
||||||
if b.Config.Artifacts == nil {
|
for _, env := range jobEnvs {
|
||||||
b.Config.Artifacts = make(map[string]*config.Artifact)
|
if env.Spec != nil {
|
||||||
}
|
for _, dep := range env.Spec.Dependencies {
|
||||||
|
if libraries.IsEnvironmentDependencyLocal(dep) {
|
||||||
log.Debugf(ctx, "Adding an artifact block for %s", match)
|
matchAndAdd(ctx, dep, b)
|
||||||
b.Config.Artifacts[name] = &config.Artifact{
|
}
|
||||||
Files: []config.ArtifactFile{
|
|
||||||
{Source: match},
|
|
||||||
},
|
|
||||||
Type: config.ArtifactPythonWheel,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -54,3 +49,26 @@ func (*fromLibraries) Apply(ctx context.Context, b *bundle.Bundle) error {
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func matchAndAdd(ctx context.Context, lib string, b *bundle.Bundle) {
|
||||||
|
matches, err := filepath.Glob(filepath.Join(b.RootPath, lib))
|
||||||
|
// File referenced from libraries section does not exists, skipping
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, match := range matches {
|
||||||
|
name := filepath.Base(match)
|
||||||
|
if b.Config.Artifacts == nil {
|
||||||
|
b.Config.Artifacts = make(map[string]*config.Artifact)
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Debugf(ctx, "Adding an artifact block for %s", match)
|
||||||
|
b.Config.Artifacts[name] = &config.Artifact{
|
||||||
|
Files: []config.ArtifactFile{
|
||||||
|
{Source: match},
|
||||||
|
},
|
||||||
|
Type: config.ArtifactPythonWheel,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -5,6 +5,7 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle"
|
"github.com/databricks/cli/bundle"
|
||||||
|
"github.com/databricks/cli/libs/diag"
|
||||||
"github.com/databricks/cli/libs/python"
|
"github.com/databricks/cli/libs/python"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -12,11 +13,11 @@ type infer struct {
|
||||||
name string
|
name string
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *infer) Apply(ctx context.Context, b *bundle.Bundle) error {
|
func (m *infer) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||||
artifact := b.Config.Artifacts[m.name]
|
artifact := b.Config.Artifacts[m.name]
|
||||||
py, err := python.DetectExecutable(ctx)
|
py, err := python.DetectExecutable(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return diag.FromErr(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Note: using --build-number (build tag) flag does not help with re-installing
|
// Note: using --build-number (build tag) flag does not help with re-installing
|
||||||
|
|
|
@ -16,12 +16,13 @@ import (
|
||||||
"github.com/databricks/cli/bundle/config"
|
"github.com/databricks/cli/bundle/config"
|
||||||
"github.com/databricks/cli/bundle/env"
|
"github.com/databricks/cli/bundle/env"
|
||||||
"github.com/databricks/cli/bundle/metadata"
|
"github.com/databricks/cli/bundle/metadata"
|
||||||
"github.com/databricks/cli/folders"
|
"github.com/databricks/cli/libs/fileset"
|
||||||
"github.com/databricks/cli/libs/git"
|
"github.com/databricks/cli/libs/git"
|
||||||
"github.com/databricks/cli/libs/locker"
|
"github.com/databricks/cli/libs/locker"
|
||||||
"github.com/databricks/cli/libs/log"
|
"github.com/databricks/cli/libs/log"
|
||||||
"github.com/databricks/cli/libs/tags"
|
"github.com/databricks/cli/libs/tags"
|
||||||
"github.com/databricks/cli/libs/terraform"
|
"github.com/databricks/cli/libs/terraform"
|
||||||
|
"github.com/databricks/cli/libs/vfs"
|
||||||
"github.com/databricks/databricks-sdk-go"
|
"github.com/databricks/databricks-sdk-go"
|
||||||
sdkconfig "github.com/databricks/databricks-sdk-go/config"
|
sdkconfig "github.com/databricks/databricks-sdk-go/config"
|
||||||
"github.com/hashicorp/terraform-exec/tfexec"
|
"github.com/hashicorp/terraform-exec/tfexec"
|
||||||
|
@ -30,6 +31,14 @@ import (
|
||||||
const internalFolder = ".internal"
|
const internalFolder = ".internal"
|
||||||
|
|
||||||
type Bundle struct {
|
type Bundle struct {
|
||||||
|
// RootPath contains the directory path to the root of the bundle.
|
||||||
|
// It is set when we instantiate a new bundle instance.
|
||||||
|
RootPath string
|
||||||
|
|
||||||
|
// BundleRoot is a virtual filesystem path to the root of the bundle.
|
||||||
|
// Exclusively use this field for filesystem operations.
|
||||||
|
BundleRoot vfs.Path
|
||||||
|
|
||||||
Config config.Root
|
Config config.Root
|
||||||
|
|
||||||
// Metadata about the bundle deployment. This is the interface Databricks services
|
// Metadata about the bundle deployment. This is the interface Databricks services
|
||||||
|
@ -45,6 +54,9 @@ type Bundle struct {
|
||||||
clientOnce sync.Once
|
clientOnce sync.Once
|
||||||
client *databricks.WorkspaceClient
|
client *databricks.WorkspaceClient
|
||||||
|
|
||||||
|
// Files that are synced to the workspace.file_path
|
||||||
|
Files []fileset.File
|
||||||
|
|
||||||
// Stores an initialized copy of this bundle's Terraform wrapper.
|
// Stores an initialized copy of this bundle's Terraform wrapper.
|
||||||
Terraform *tfexec.Terraform
|
Terraform *tfexec.Terraform
|
||||||
|
|
||||||
|
@ -63,33 +75,15 @@ type Bundle struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func Load(ctx context.Context, path string) (*Bundle, error) {
|
func Load(ctx context.Context, path string) (*Bundle, error) {
|
||||||
b := &Bundle{}
|
b := &Bundle{
|
||||||
stat, err := os.Stat(path)
|
RootPath: filepath.Clean(path),
|
||||||
if err != nil {
|
BundleRoot: vfs.MustNew(path),
|
||||||
return nil, err
|
|
||||||
}
|
}
|
||||||
configFile, err := config.FileNames.FindInPath(path)
|
configFile, err := config.FileNames.FindInPath(path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
_, hasRootEnv := env.Root(ctx)
|
|
||||||
_, hasIncludesEnv := env.Includes(ctx)
|
|
||||||
if hasRootEnv && hasIncludesEnv && stat.IsDir() {
|
|
||||||
log.Debugf(ctx, "No bundle configuration; using bundle root: %s", path)
|
|
||||||
b.Config = config.Root{
|
|
||||||
Path: path,
|
|
||||||
Bundle: config.Bundle{
|
|
||||||
Name: filepath.Base(path),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
return b, nil
|
|
||||||
}
|
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
log.Debugf(ctx, "Loading bundle configuration from: %s", configFile)
|
log.Debugf(ctx, "Found bundle root at %s (file %s)", b.RootPath, configFile)
|
||||||
root, err := config.Load(configFile)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
b.Config = *root
|
|
||||||
return b, nil
|
return b, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -158,7 +152,7 @@ func (b *Bundle) CacheDir(ctx context.Context, paths ...string) (string, error)
|
||||||
if !exists || cacheDirName == "" {
|
if !exists || cacheDirName == "" {
|
||||||
cacheDirName = filepath.Join(
|
cacheDirName = filepath.Join(
|
||||||
// Anchor at bundle root directory.
|
// Anchor at bundle root directory.
|
||||||
b.Config.Path,
|
b.RootPath,
|
||||||
// Static cache directory.
|
// Static cache directory.
|
||||||
".databricks",
|
".databricks",
|
||||||
"bundle",
|
"bundle",
|
||||||
|
@ -210,7 +204,7 @@ func (b *Bundle) GetSyncIncludePatterns(ctx context.Context) ([]string, error) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
internalDirRel, err := filepath.Rel(b.Config.Path, internalDir)
|
internalDirRel, err := filepath.Rel(b.RootPath, internalDir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -218,12 +212,12 @@ func (b *Bundle) GetSyncIncludePatterns(ctx context.Context) ([]string, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *Bundle) GitRepository() (*git.Repository, error) {
|
func (b *Bundle) GitRepository() (*git.Repository, error) {
|
||||||
rootPath, err := folders.FindDirWithLeaf(b.Config.Path, ".git")
|
_, err := vfs.FindLeafInTree(b.BundleRoot, ".git")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("unable to locate repository root: %w", err)
|
return nil, fmt.Errorf("unable to locate repository root: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return git.NewRepository(rootPath)
|
return git.NewRepository(b.BundleRoot)
|
||||||
}
|
}
|
||||||
|
|
||||||
// AuthEnv returns a map with environment variables and their values
|
// AuthEnv returns a map with environment variables and their values
|
||||||
|
|
|
@ -0,0 +1,41 @@
|
||||||
|
package bundle
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
"github.com/databricks/cli/bundle/config"
|
||||||
|
"github.com/databricks/cli/libs/vfs"
|
||||||
|
"github.com/databricks/databricks-sdk-go"
|
||||||
|
)
|
||||||
|
|
||||||
|
type ReadOnlyBundle struct {
|
||||||
|
b *Bundle
|
||||||
|
}
|
||||||
|
|
||||||
|
func ReadOnly(b *Bundle) ReadOnlyBundle {
|
||||||
|
return ReadOnlyBundle{b: b}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r ReadOnlyBundle) Config() config.Root {
|
||||||
|
return r.b.Config
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r ReadOnlyBundle) RootPath() string {
|
||||||
|
return r.b.RootPath
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r ReadOnlyBundle) BundleRoot() vfs.Path {
|
||||||
|
return r.b.BundleRoot
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r ReadOnlyBundle) WorkspaceClient() *databricks.WorkspaceClient {
|
||||||
|
return r.b.WorkspaceClient()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r ReadOnlyBundle) CacheDir(ctx context.Context, paths ...string) (string, error) {
|
||||||
|
return r.b.CacheDir(ctx, paths...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r ReadOnlyBundle) GetSyncIncludePatterns(ctx context.Context) ([]string, error) {
|
||||||
|
return r.b.GetSyncIncludePatterns(ctx)
|
||||||
|
}
|
|
@ -2,25 +2,28 @@ package bundle
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"errors"
|
||||||
|
"io/fs"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle/env"
|
"github.com/databricks/cli/bundle/env"
|
||||||
|
"github.com/databricks/cli/internal/testutil"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestLoadNotExists(t *testing.T) {
|
func TestLoadNotExists(t *testing.T) {
|
||||||
b, err := Load(context.Background(), "/doesntexist")
|
b, err := Load(context.Background(), "/doesntexist")
|
||||||
assert.True(t, os.IsNotExist(err))
|
assert.True(t, errors.Is(err, fs.ErrNotExist))
|
||||||
assert.Nil(t, b)
|
assert.Nil(t, b)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestLoadExists(t *testing.T) {
|
func TestLoadExists(t *testing.T) {
|
||||||
b, err := Load(context.Background(), "./tests/basic")
|
b, err := Load(context.Background(), "./tests/basic")
|
||||||
require.Nil(t, err)
|
assert.NoError(t, err)
|
||||||
assert.Equal(t, "basic", b.Config.Bundle.Name)
|
assert.NotNil(t, b)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestBundleCacheDir(t *testing.T) {
|
func TestBundleCacheDir(t *testing.T) {
|
||||||
|
@ -76,7 +79,7 @@ func TestBundleMustLoadSuccess(t *testing.T) {
|
||||||
t.Setenv(env.RootVariable, "./tests/basic")
|
t.Setenv(env.RootVariable, "./tests/basic")
|
||||||
b, err := MustLoad(context.Background())
|
b, err := MustLoad(context.Background())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, "tests/basic", filepath.ToSlash(b.Config.Path))
|
assert.Equal(t, "tests/basic", filepath.ToSlash(b.RootPath))
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestBundleMustLoadFailureWithEnv(t *testing.T) {
|
func TestBundleMustLoadFailureWithEnv(t *testing.T) {
|
||||||
|
@ -86,7 +89,7 @@ func TestBundleMustLoadFailureWithEnv(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestBundleMustLoadFailureIfNotFound(t *testing.T) {
|
func TestBundleMustLoadFailureIfNotFound(t *testing.T) {
|
||||||
chdir(t, t.TempDir())
|
testutil.Chdir(t, t.TempDir())
|
||||||
_, err := MustLoad(context.Background())
|
_, err := MustLoad(context.Background())
|
||||||
require.Error(t, err, "unable to find bundle root")
|
require.Error(t, err, "unable to find bundle root")
|
||||||
}
|
}
|
||||||
|
@ -95,7 +98,7 @@ func TestBundleTryLoadSuccess(t *testing.T) {
|
||||||
t.Setenv(env.RootVariable, "./tests/basic")
|
t.Setenv(env.RootVariable, "./tests/basic")
|
||||||
b, err := TryLoad(context.Background())
|
b, err := TryLoad(context.Background())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, "tests/basic", filepath.ToSlash(b.Config.Path))
|
assert.Equal(t, "tests/basic", filepath.ToSlash(b.RootPath))
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestBundleTryLoadFailureWithEnv(t *testing.T) {
|
func TestBundleTryLoadFailureWithEnv(t *testing.T) {
|
||||||
|
@ -105,7 +108,7 @@ func TestBundleTryLoadFailureWithEnv(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestBundleTryLoadOkIfNotFound(t *testing.T) {
|
func TestBundleTryLoadOkIfNotFound(t *testing.T) {
|
||||||
chdir(t, t.TempDir())
|
testutil.Chdir(t, t.TempDir())
|
||||||
b, err := TryLoad(context.Background())
|
b, err := TryLoad(context.Background())
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.Nil(t, b)
|
assert.Nil(t, b)
|
||||||
|
|
|
@ -3,18 +3,16 @@ package config
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"path"
|
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle/config/paths"
|
"github.com/databricks/cli/bundle/config/paths"
|
||||||
"github.com/databricks/cli/libs/exec"
|
"github.com/databricks/cli/libs/exec"
|
||||||
"github.com/databricks/databricks-sdk-go/service/compute"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type Artifacts map[string]*Artifact
|
type Artifacts map[string]*Artifact
|
||||||
|
|
||||||
func (artifacts Artifacts) SetConfigFilePath(path string) {
|
func (artifacts Artifacts) ConfigureConfigFilePath() {
|
||||||
for _, artifact := range artifacts {
|
for _, artifact := range artifacts {
|
||||||
artifact.ConfigFilePath = path
|
artifact.ConfigureConfigFilePath()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -23,9 +21,8 @@ type ArtifactType string
|
||||||
const ArtifactPythonWheel ArtifactType = `whl`
|
const ArtifactPythonWheel ArtifactType = `whl`
|
||||||
|
|
||||||
type ArtifactFile struct {
|
type ArtifactFile struct {
|
||||||
Source string `json:"source"`
|
Source string `json:"source"`
|
||||||
RemotePath string `json:"-" bundle:"readonly"`
|
RemotePath string `json:"remote_path" bundle:"readonly"`
|
||||||
Libraries []*compute.Library `json:"-" bundle:"readonly"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Artifact defines a single local code artifact that can be
|
// Artifact defines a single local code artifact that can be
|
||||||
|
@ -65,36 +62,3 @@ func (a *Artifact) Build(ctx context.Context) ([]byte, error) {
|
||||||
}
|
}
|
||||||
return e.Exec(ctx, a.BuildCommand)
|
return e.Exec(ctx, a.BuildCommand)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *Artifact) NormalisePaths() {
|
|
||||||
for _, f := range a.Files {
|
|
||||||
// If no libraries attached, nothing to normalise, skipping
|
|
||||||
if f.Libraries == nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
wsfsBase := "/Workspace"
|
|
||||||
remotePath := path.Join(wsfsBase, f.RemotePath)
|
|
||||||
for i := range f.Libraries {
|
|
||||||
lib := f.Libraries[i]
|
|
||||||
if lib.Whl != "" {
|
|
||||||
lib.Whl = remotePath
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if lib.Jar != "" {
|
|
||||||
lib.Jar = remotePath
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// This function determines if artifact files needs to be uploaded.
|
|
||||||
// During the bundle processing we analyse which library uses which artifact file.
|
|
||||||
// If artifact file is used as a library, we store the reference to this library in artifact file Libraries field.
|
|
||||||
// If artifact file has libraries it's been used in, it means than we need to upload this file.
|
|
||||||
// Otherwise this artifact file is not used and we skip uploading
|
|
||||||
func (af *ArtifactFile) NeedsUpload() bool {
|
|
||||||
return af.Libraries != nil
|
|
||||||
}
|
|
||||||
|
|
|
@ -25,9 +25,6 @@ type Bundle struct {
|
||||||
// For example, where to find the binary, which version to use, etc.
|
// For example, where to find the binary, which version to use, etc.
|
||||||
Terraform *Terraform `json:"terraform,omitempty" bundle:"readonly"`
|
Terraform *Terraform `json:"terraform,omitempty" bundle:"readonly"`
|
||||||
|
|
||||||
// Lock configures locking behavior on deployment.
|
|
||||||
Lock Lock `json:"lock" bundle:"readonly"`
|
|
||||||
|
|
||||||
// Force-override Git branch validation.
|
// Force-override Git branch validation.
|
||||||
Force bool `json:"force,omitempty" bundle:"readonly"`
|
Force bool `json:"force,omitempty" bundle:"readonly"`
|
||||||
|
|
||||||
|
@ -43,4 +40,10 @@ type Bundle struct {
|
||||||
|
|
||||||
// Overrides the compute used for jobs and other supported assets.
|
// Overrides the compute used for jobs and other supported assets.
|
||||||
ComputeID string `json:"compute_id,omitempty"`
|
ComputeID string `json:"compute_id,omitempty"`
|
||||||
|
|
||||||
|
// Deployment section specifies deployment related configuration for bundle
|
||||||
|
Deployment Deployment `json:"deployment,omitempty"`
|
||||||
|
|
||||||
|
// Databricks CLI version constraints required to run the bundle.
|
||||||
|
DatabricksCliVersion string `json:"databricks_cli_version,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,10 @@
|
||||||
|
package config
|
||||||
|
|
||||||
|
type Deployment struct {
|
||||||
|
// FailOnActiveRuns specifies whether to fail the deployment if there are
|
||||||
|
// running jobs or pipelines in the workspace. Defaults to false.
|
||||||
|
FailOnActiveRuns bool `json:"fail_on_active_runs,omitempty"`
|
||||||
|
|
||||||
|
// Lock configures locking behavior on deployment.
|
||||||
|
Lock Lock `json:"lock"`
|
||||||
|
}
|
|
@ -10,6 +10,35 @@ type Experimental struct {
|
||||||
// In this case the configured wheel task will be deployed as a notebook task which install defined wheel in runtime and executes it.
|
// In this case the configured wheel task will be deployed as a notebook task which install defined wheel in runtime and executes it.
|
||||||
// For more details see https://github.com/databricks/cli/pull/797 and https://github.com/databricks/cli/pull/635
|
// For more details see https://github.com/databricks/cli/pull/797 and https://github.com/databricks/cli/pull/635
|
||||||
PythonWheelWrapper bool `json:"python_wheel_wrapper,omitempty"`
|
PythonWheelWrapper bool `json:"python_wheel_wrapper,omitempty"`
|
||||||
|
|
||||||
|
// Enable legacy run_as behavior. That is:
|
||||||
|
// - Set the run_as identity as the owner of any pipelines in the bundle.
|
||||||
|
// - Do not error in the presence of resources that do not support run_as.
|
||||||
|
// As of April 2024 this includes pipelines and model serving endpoints.
|
||||||
|
//
|
||||||
|
// This mode of run_as requires the deploying user to be a workspace and metastore
|
||||||
|
// admin. Use of this flag is not recommend for new bundles, and it is only provided
|
||||||
|
// to unblock customers that are stuck due to breaking changes in the run_as behavior
|
||||||
|
// made in https://github.com/databricks/cli/pull/1233. This flag might
|
||||||
|
// be removed in the future once we have a proper workaround like allowing IS_OWNER
|
||||||
|
// as a top-level permission in the DAB.
|
||||||
|
UseLegacyRunAs bool `json:"use_legacy_run_as,omitempty"`
|
||||||
|
|
||||||
|
// PyDABs determines whether to load the 'databricks-pydabs' package.
|
||||||
|
//
|
||||||
|
// PyDABs allows to define bundle configuration using Python.
|
||||||
|
PyDABs PyDABs `json:"pydabs,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type PyDABs struct {
|
||||||
|
// Enabled is a flag to enable the feature.
|
||||||
|
Enabled bool `json:"enabled,omitempty"`
|
||||||
|
|
||||||
|
// VEnvPath is path to the virtual environment.
|
||||||
|
//
|
||||||
|
// Required if PyDABs is enabled. PyDABs will load the code in the specified
|
||||||
|
// environment.
|
||||||
|
VEnvPath string `json:"venv_path,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type Command string
|
type Command string
|
||||||
|
|
|
@ -17,12 +17,12 @@ func ConvertJobToValue(job *jobs.Job) (dyn.Value, error) {
|
||||||
for _, task := range job.Settings.Tasks {
|
for _, task := range job.Settings.Tasks {
|
||||||
v, err := convertTaskToValue(task, taskOrder)
|
v, err := convertTaskToValue(task, taskOrder)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return dyn.NilValue, err
|
return dyn.InvalidValue, err
|
||||||
}
|
}
|
||||||
tasks = append(tasks, v)
|
tasks = append(tasks, v)
|
||||||
}
|
}
|
||||||
// We're using location lines to define the order of keys in exported YAML.
|
// We're using location lines to define the order of keys in exported YAML.
|
||||||
value["tasks"] = dyn.NewValue(tasks, dyn.Location{Line: jobOrder.Get("tasks")})
|
value["tasks"] = dyn.NewValue(tasks, []dyn.Location{{Line: jobOrder.Get("tasks")}})
|
||||||
}
|
}
|
||||||
|
|
||||||
return yamlsaver.ConvertToMapValue(job.Settings, jobOrder, []string{"format", "new_cluster", "existing_cluster_id"}, value)
|
return yamlsaver.ConvertToMapValue(job.Settings, jobOrder, []string{"format", "new_cluster", "existing_cluster_id"}, value)
|
||||||
|
|
|
@ -9,8 +9,8 @@ type Git struct {
|
||||||
BundleRootPath string `json:"bundle_root_path,omitempty" bundle:"readonly"`
|
BundleRootPath string `json:"bundle_root_path,omitempty" bundle:"readonly"`
|
||||||
|
|
||||||
// Inferred is set to true if the Git details were inferred and weren't set explicitly
|
// Inferred is set to true if the Git details were inferred and weren't set explicitly
|
||||||
Inferred bool `json:"-" bundle:"readonly"`
|
Inferred bool `json:"inferred,omitempty" bundle:"readonly"`
|
||||||
|
|
||||||
// The actual branch according to Git (may be different from the configured branch)
|
// The actual branch according to Git (may be different from the configured branch)
|
||||||
ActualBranch string `json:"-" bundle:"readonly"`
|
ActualBranch string `json:"actual_branch,omitempty" bundle:"readonly"`
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,254 +0,0 @@
|
||||||
package interpolation
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"reflect"
|
|
||||||
"regexp"
|
|
||||||
"sort"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"slices"
|
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle"
|
|
||||||
"github.com/databricks/cli/bundle/config/variable"
|
|
||||||
"golang.org/x/exp/maps"
|
|
||||||
)
|
|
||||||
|
|
||||||
const Delimiter = "."
|
|
||||||
|
|
||||||
// must start with alphabet, support hyphens and underscores in middle but must end with character
|
|
||||||
var re = regexp.MustCompile(`\$\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\}`)
|
|
||||||
|
|
||||||
type stringField struct {
|
|
||||||
path string
|
|
||||||
|
|
||||||
getter
|
|
||||||
setter
|
|
||||||
}
|
|
||||||
|
|
||||||
func newStringField(path string, g getter, s setter) *stringField {
|
|
||||||
return &stringField{
|
|
||||||
path: path,
|
|
||||||
|
|
||||||
getter: g,
|
|
||||||
setter: s,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *stringField) dependsOn() []string {
|
|
||||||
var out []string
|
|
||||||
m := re.FindAllStringSubmatch(s.Get(), -1)
|
|
||||||
for i := range m {
|
|
||||||
out = append(out, m[i][1])
|
|
||||||
}
|
|
||||||
return out
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *stringField) interpolate(fns []LookupFunction, lookup map[string]string) {
|
|
||||||
out := re.ReplaceAllStringFunc(s.Get(), func(s string) string {
|
|
||||||
// Turn the whole match into the submatch.
|
|
||||||
match := re.FindStringSubmatch(s)
|
|
||||||
for _, fn := range fns {
|
|
||||||
v, err := fn(match[1], lookup)
|
|
||||||
if errors.Is(err, ErrSkipInterpolation) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
|
|
||||||
// No substitution.
|
|
||||||
return s
|
|
||||||
})
|
|
||||||
|
|
||||||
s.Set(out)
|
|
||||||
}
|
|
||||||
|
|
||||||
type accumulator struct {
|
|
||||||
// all string fields in the bundle config
|
|
||||||
strings map[string]*stringField
|
|
||||||
|
|
||||||
// contains path -> resolved_string mapping for string fields in the config
|
|
||||||
// The resolved strings will NOT contain any variable references that could
|
|
||||||
// have been resolved, however there might still be references that cannot
|
|
||||||
// be resolved
|
|
||||||
memo map[string]string
|
|
||||||
}
|
|
||||||
|
|
||||||
// jsonFieldName returns the name in a field's `json` tag.
|
|
||||||
// Returns the empty string if it isn't set.
|
|
||||||
func jsonFieldName(sf reflect.StructField) string {
|
|
||||||
tag, ok := sf.Tag.Lookup("json")
|
|
||||||
if !ok {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
parts := strings.Split(tag, ",")
|
|
||||||
if parts[0] == "-" {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
return parts[0]
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *accumulator) walkStruct(scope []string, rv reflect.Value) {
|
|
||||||
num := rv.NumField()
|
|
||||||
for i := 0; i < num; i++ {
|
|
||||||
sf := rv.Type().Field(i)
|
|
||||||
f := rv.Field(i)
|
|
||||||
|
|
||||||
// Walk field with the same scope for anonymous (embedded) fields.
|
|
||||||
if sf.Anonymous {
|
|
||||||
a.walk(scope, f, anySetter{f})
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// Skip unnamed fields.
|
|
||||||
fieldName := jsonFieldName(rv.Type().Field(i))
|
|
||||||
if fieldName == "" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
a.walk(append(scope, fieldName), f, anySetter{f})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *accumulator) walk(scope []string, rv reflect.Value, s setter) {
|
|
||||||
// Dereference pointer.
|
|
||||||
if rv.Type().Kind() == reflect.Pointer {
|
|
||||||
// Skip nil pointers.
|
|
||||||
if rv.IsNil() {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
rv = rv.Elem()
|
|
||||||
s = anySetter{rv}
|
|
||||||
}
|
|
||||||
|
|
||||||
switch rv.Type().Kind() {
|
|
||||||
case reflect.String:
|
|
||||||
path := strings.Join(scope, Delimiter)
|
|
||||||
a.strings[path] = newStringField(path, anyGetter{rv}, s)
|
|
||||||
|
|
||||||
// register alias for variable value. `var.foo` would be the alias for
|
|
||||||
// `variables.foo.value`
|
|
||||||
if len(scope) == 3 && scope[0] == "variables" && scope[2] == "value" {
|
|
||||||
aliasPath := strings.Join([]string{variable.VariableReferencePrefix, scope[1]}, Delimiter)
|
|
||||||
a.strings[aliasPath] = a.strings[path]
|
|
||||||
}
|
|
||||||
case reflect.Struct:
|
|
||||||
a.walkStruct(scope, rv)
|
|
||||||
case reflect.Map:
|
|
||||||
if rv.Type().Key().Kind() != reflect.String {
|
|
||||||
panic("only support string keys in map")
|
|
||||||
}
|
|
||||||
keys := rv.MapKeys()
|
|
||||||
for _, key := range keys {
|
|
||||||
a.walk(append(scope, key.String()), rv.MapIndex(key), mapSetter{rv, key})
|
|
||||||
}
|
|
||||||
case reflect.Slice:
|
|
||||||
n := rv.Len()
|
|
||||||
name := scope[len(scope)-1]
|
|
||||||
base := scope[:len(scope)-1]
|
|
||||||
for i := 0; i < n; i++ {
|
|
||||||
element := rv.Index(i)
|
|
||||||
a.walk(append(base, fmt.Sprintf("%s[%d]", name, i)), element, anySetter{element})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// walk and gather all string fields in the config
|
|
||||||
func (a *accumulator) start(v any) {
|
|
||||||
rv := reflect.ValueOf(v)
|
|
||||||
if rv.Type().Kind() != reflect.Pointer {
|
|
||||||
panic("expect pointer")
|
|
||||||
}
|
|
||||||
rv = rv.Elem()
|
|
||||||
if rv.Type().Kind() != reflect.Struct {
|
|
||||||
panic("expect struct")
|
|
||||||
}
|
|
||||||
|
|
||||||
a.strings = make(map[string]*stringField)
|
|
||||||
a.memo = make(map[string]string)
|
|
||||||
a.walk([]string{}, rv, nilSetter{})
|
|
||||||
}
|
|
||||||
|
|
||||||
// recursively interpolate variables in a depth first manner
|
|
||||||
func (a *accumulator) Resolve(path string, seenPaths []string, fns ...LookupFunction) error {
|
|
||||||
// return early if the path is already resolved
|
|
||||||
if _, ok := a.memo[path]; ok {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// fetch the string node to resolve
|
|
||||||
field, ok := a.strings[path]
|
|
||||||
if !ok {
|
|
||||||
return fmt.Errorf("no value found for interpolation reference: ${%s}", path)
|
|
||||||
}
|
|
||||||
|
|
||||||
// return early if the string field has no variables to interpolate
|
|
||||||
if len(field.dependsOn()) == 0 {
|
|
||||||
a.memo[path] = field.Get()
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// resolve all variables refered in the root string field
|
|
||||||
for _, childFieldPath := range field.dependsOn() {
|
|
||||||
// error if there is a loop in variable interpolation
|
|
||||||
if slices.Contains(seenPaths, childFieldPath) {
|
|
||||||
return fmt.Errorf("cycle detected in field resolution: %s", strings.Join(append(seenPaths, childFieldPath), " -> "))
|
|
||||||
}
|
|
||||||
|
|
||||||
// recursive resolve variables in the child fields
|
|
||||||
err := a.Resolve(childFieldPath, append(seenPaths, childFieldPath), fns...)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// interpolate root string once all variable references in it have been resolved
|
|
||||||
field.interpolate(fns, a.memo)
|
|
||||||
|
|
||||||
// record interpolated string in memo
|
|
||||||
a.memo[path] = field.Get()
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Interpolate all string fields in the config
|
|
||||||
func (a *accumulator) expand(fns ...LookupFunction) error {
|
|
||||||
// sorting paths for stable order of iteration
|
|
||||||
paths := maps.Keys(a.strings)
|
|
||||||
sort.Strings(paths)
|
|
||||||
|
|
||||||
// iterate over paths for all strings fields in the config
|
|
||||||
for _, path := range paths {
|
|
||||||
err := a.Resolve(path, []string{path}, fns...)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type interpolate struct {
|
|
||||||
fns []LookupFunction
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *interpolate) expand(v any) error {
|
|
||||||
a := accumulator{}
|
|
||||||
a.start(v)
|
|
||||||
return a.expand(m.fns...)
|
|
||||||
}
|
|
||||||
|
|
||||||
func Interpolate(fns ...LookupFunction) bundle.Mutator {
|
|
||||||
return &interpolate{fns: fns}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *interpolate) Name() string {
|
|
||||||
return "Interpolate"
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *interpolate) Apply(_ context.Context, b *bundle.Bundle) error {
|
|
||||||
return m.expand(&b.Config)
|
|
||||||
}
|
|
|
@ -1,251 +0,0 @@
|
||||||
package interpolation
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle/config"
|
|
||||||
"github.com/databricks/cli/bundle/config/variable"
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
)
|
|
||||||
|
|
||||||
type nest struct {
|
|
||||||
X string `json:"x"`
|
|
||||||
Y *string `json:"y"`
|
|
||||||
Z map[string]string `json:"z"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type foo struct {
|
|
||||||
A string `json:"a"`
|
|
||||||
B string `json:"b"`
|
|
||||||
C string `json:"c"`
|
|
||||||
|
|
||||||
// Pointer field
|
|
||||||
D *string `json:"d"`
|
|
||||||
|
|
||||||
// Struct field
|
|
||||||
E nest `json:"e"`
|
|
||||||
|
|
||||||
// Map field
|
|
||||||
F map[string]string `json:"f"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func expand(v any) error {
|
|
||||||
a := accumulator{}
|
|
||||||
a.start(v)
|
|
||||||
return a.expand(DefaultLookup)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestInterpolationVariables(t *testing.T) {
|
|
||||||
f := foo{
|
|
||||||
A: "a",
|
|
||||||
B: "${a}",
|
|
||||||
C: "${a}",
|
|
||||||
}
|
|
||||||
|
|
||||||
err := expand(&f)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
assert.Equal(t, "a", f.A)
|
|
||||||
assert.Equal(t, "a", f.B)
|
|
||||||
assert.Equal(t, "a", f.C)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestInterpolationVariablesSpecialChars(t *testing.T) {
|
|
||||||
type bar struct {
|
|
||||||
A string `json:"a-b"`
|
|
||||||
B string `json:"b_c"`
|
|
||||||
C string `json:"c-_a"`
|
|
||||||
}
|
|
||||||
f := bar{
|
|
||||||
A: "a",
|
|
||||||
B: "${a-b}",
|
|
||||||
C: "${a-b}",
|
|
||||||
}
|
|
||||||
|
|
||||||
err := expand(&f)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
assert.Equal(t, "a", f.A)
|
|
||||||
assert.Equal(t, "a", f.B)
|
|
||||||
assert.Equal(t, "a", f.C)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestInterpolationValidMatches(t *testing.T) {
|
|
||||||
expectedMatches := map[string]string{
|
|
||||||
"${hello_world.world_world}": "hello_world.world_world",
|
|
||||||
"${helloworld.world-world}": "helloworld.world-world",
|
|
||||||
"${hello-world.world-world}": "hello-world.world-world",
|
|
||||||
}
|
|
||||||
for interpolationStr, expectedMatch := range expectedMatches {
|
|
||||||
match := re.FindStringSubmatch(interpolationStr)
|
|
||||||
assert.True(t, len(match) > 0,
|
|
||||||
"Failed to match %s and find %s", interpolationStr, expectedMatch)
|
|
||||||
assert.Equal(t, expectedMatch, match[1],
|
|
||||||
"Failed to match the exact pattern %s and find %s", interpolationStr, expectedMatch)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestInterpolationInvalidMatches(t *testing.T) {
|
|
||||||
invalidMatches := []string{
|
|
||||||
"${hello_world-.world_world}", // the first segment ending must not end with hyphen (-)
|
|
||||||
"${hello_world-_.world_world}", // the first segment ending must not end with underscore (_)
|
|
||||||
"${helloworld.world-world-}", // second segment must not end with hyphen (-)
|
|
||||||
"${helloworld-.world-world}", // first segment must not end with hyphen (-)
|
|
||||||
"${helloworld.-world-world}", // second segment must not start with hyphen (-)
|
|
||||||
"${-hello-world.-world-world-}", // must not start or end with hyphen (-)
|
|
||||||
"${_-_._-_.id}", // cannot use _- in sequence
|
|
||||||
"${0helloworld.world-world}", // interpolated first section shouldn't start with number
|
|
||||||
"${helloworld.9world-world}", // interpolated second section shouldn't start with number
|
|
||||||
"${a-a.a-_a-a.id}", // fails because of -_ in the second segment
|
|
||||||
"${a-a.a--a-a.id}", // fails because of -- in the second segment
|
|
||||||
}
|
|
||||||
for _, invalidMatch := range invalidMatches {
|
|
||||||
match := re.FindStringSubmatch(invalidMatch)
|
|
||||||
assert.True(t, len(match) == 0, "Should be invalid interpolation: %s", invalidMatch)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestInterpolationWithPointers(t *testing.T) {
|
|
||||||
fd := "${a}"
|
|
||||||
f := foo{
|
|
||||||
A: "a",
|
|
||||||
D: &fd,
|
|
||||||
}
|
|
||||||
|
|
||||||
err := expand(&f)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
assert.Equal(t, "a", f.A)
|
|
||||||
assert.Equal(t, "a", *f.D)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestInterpolationWithStruct(t *testing.T) {
|
|
||||||
fy := "${e.x}"
|
|
||||||
f := foo{
|
|
||||||
A: "${e.x}",
|
|
||||||
E: nest{
|
|
||||||
X: "x",
|
|
||||||
Y: &fy,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
err := expand(&f)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
assert.Equal(t, "x", f.A)
|
|
||||||
assert.Equal(t, "x", f.E.X)
|
|
||||||
assert.Equal(t, "x", *f.E.Y)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestInterpolationWithMap(t *testing.T) {
|
|
||||||
f := foo{
|
|
||||||
A: "${f.a}",
|
|
||||||
F: map[string]string{
|
|
||||||
"a": "a",
|
|
||||||
"b": "${f.a}",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
err := expand(&f)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
assert.Equal(t, "a", f.A)
|
|
||||||
assert.Equal(t, "a", f.F["a"])
|
|
||||||
assert.Equal(t, "a", f.F["b"])
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestInterpolationWithResursiveVariableReferences(t *testing.T) {
|
|
||||||
f := foo{
|
|
||||||
A: "a",
|
|
||||||
B: "(${a})",
|
|
||||||
C: "${a} ${b}",
|
|
||||||
}
|
|
||||||
|
|
||||||
err := expand(&f)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
assert.Equal(t, "a", f.A)
|
|
||||||
assert.Equal(t, "(a)", f.B)
|
|
||||||
assert.Equal(t, "a (a)", f.C)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestInterpolationVariableLoopError(t *testing.T) {
|
|
||||||
d := "${b}"
|
|
||||||
f := foo{
|
|
||||||
A: "a",
|
|
||||||
B: "${c}",
|
|
||||||
C: "${d}",
|
|
||||||
D: &d,
|
|
||||||
}
|
|
||||||
|
|
||||||
err := expand(&f)
|
|
||||||
assert.ErrorContains(t, err, "cycle detected in field resolution: b -> c -> d -> b")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestInterpolationForVariables(t *testing.T) {
|
|
||||||
foo := "abc"
|
|
||||||
bar := "${var.foo} def"
|
|
||||||
apple := "${var.foo} ${var.bar}"
|
|
||||||
config := config.Root{
|
|
||||||
Variables: map[string]*variable.Variable{
|
|
||||||
"foo": {
|
|
||||||
Value: &foo,
|
|
||||||
},
|
|
||||||
"bar": {
|
|
||||||
Value: &bar,
|
|
||||||
},
|
|
||||||
"apple": {
|
|
||||||
Value: &apple,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Bundle: config.Bundle{
|
|
||||||
Name: "${var.apple} ${var.foo}",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
err := expand(&config)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.Equal(t, "abc", *(config.Variables["foo"].Value))
|
|
||||||
assert.Equal(t, "abc def", *(config.Variables["bar"].Value))
|
|
||||||
assert.Equal(t, "abc abc def", *(config.Variables["apple"].Value))
|
|
||||||
assert.Equal(t, "abc abc def abc", config.Bundle.Name)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestInterpolationLoopForVariables(t *testing.T) {
|
|
||||||
foo := "${var.bar}"
|
|
||||||
bar := "${var.foo}"
|
|
||||||
config := config.Root{
|
|
||||||
Variables: map[string]*variable.Variable{
|
|
||||||
"foo": {
|
|
||||||
Value: &foo,
|
|
||||||
},
|
|
||||||
"bar": {
|
|
||||||
Value: &bar,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Bundle: config.Bundle{
|
|
||||||
Name: "${var.foo}",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
err := expand(&config)
|
|
||||||
assert.ErrorContains(t, err, "cycle detected in field resolution: bundle.name -> var.foo -> var.bar -> var.foo")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestInterpolationInvalidVariableReference(t *testing.T) {
|
|
||||||
foo := "abc"
|
|
||||||
config := config.Root{
|
|
||||||
Variables: map[string]*variable.Variable{
|
|
||||||
"foo": {
|
|
||||||
Value: &foo,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Bundle: config.Bundle{
|
|
||||||
Name: "${vars.foo}",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
err := expand(&config)
|
|
||||||
assert.ErrorContains(t, err, "no value found for interpolation reference: ${vars.foo}")
|
|
||||||
}
|
|
|
@ -1,51 +0,0 @@
|
||||||
package interpolation
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"slices"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
// LookupFunction returns the value to rewrite a path expression to.
|
|
||||||
type LookupFunction func(path string, depends map[string]string) (string, error)
|
|
||||||
|
|
||||||
// ErrSkipInterpolation can be used to fall through from [LookupFunction].
|
|
||||||
var ErrSkipInterpolation = errors.New("skip interpolation")
|
|
||||||
|
|
||||||
// DefaultLookup looks up the specified path in the map.
|
|
||||||
// It returns an error if it doesn't exist.
|
|
||||||
func DefaultLookup(path string, lookup map[string]string) (string, error) {
|
|
||||||
v, ok := lookup[path]
|
|
||||||
if !ok {
|
|
||||||
return "", fmt.Errorf("expected to find value for path: %s", path)
|
|
||||||
}
|
|
||||||
return v, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func pathPrefixMatches(prefix []string, path string) bool {
|
|
||||||
parts := strings.Split(path, Delimiter)
|
|
||||||
return len(parts) >= len(prefix) && slices.Compare(prefix, parts[0:len(prefix)]) == 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// ExcludeLookupsInPath is a lookup function that skips lookups for the specified path.
|
|
||||||
func ExcludeLookupsInPath(exclude ...string) LookupFunction {
|
|
||||||
return func(path string, lookup map[string]string) (string, error) {
|
|
||||||
if pathPrefixMatches(exclude, path) {
|
|
||||||
return "", ErrSkipInterpolation
|
|
||||||
}
|
|
||||||
|
|
||||||
return DefaultLookup(path, lookup)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// IncludeLookupsInPath is a lookup function that limits lookups to the specified path.
|
|
||||||
func IncludeLookupsInPath(include ...string) LookupFunction {
|
|
||||||
return func(path string, lookup map[string]string) (string, error) {
|
|
||||||
if !pathPrefixMatches(include, path) {
|
|
||||||
return "", ErrSkipInterpolation
|
|
||||||
}
|
|
||||||
|
|
||||||
return DefaultLookup(path, lookup)
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,81 +0,0 @@
|
||||||
package interpolation
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
)
|
|
||||||
|
|
||||||
type interpolationFixture struct {
|
|
||||||
A map[string]string `json:"a"`
|
|
||||||
B map[string]string `json:"b"`
|
|
||||||
C map[string]string `json:"c"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func fixture() interpolationFixture {
|
|
||||||
return interpolationFixture{
|
|
||||||
A: map[string]string{
|
|
||||||
"x": "1",
|
|
||||||
},
|
|
||||||
B: map[string]string{
|
|
||||||
"x": "2",
|
|
||||||
},
|
|
||||||
C: map[string]string{
|
|
||||||
"ax": "${a.x}",
|
|
||||||
"bx": "${b.x}",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestExcludePath(t *testing.T) {
|
|
||||||
tmp := fixture()
|
|
||||||
m := interpolate{
|
|
||||||
fns: []LookupFunction{
|
|
||||||
ExcludeLookupsInPath("a"),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
err := m.expand(&tmp)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
assert.Equal(t, "1", tmp.A["x"])
|
|
||||||
assert.Equal(t, "2", tmp.B["x"])
|
|
||||||
assert.Equal(t, "${a.x}", tmp.C["ax"])
|
|
||||||
assert.Equal(t, "2", tmp.C["bx"])
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestIncludePath(t *testing.T) {
|
|
||||||
tmp := fixture()
|
|
||||||
m := interpolate{
|
|
||||||
fns: []LookupFunction{
|
|
||||||
IncludeLookupsInPath("a"),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
err := m.expand(&tmp)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
assert.Equal(t, "1", tmp.A["x"])
|
|
||||||
assert.Equal(t, "2", tmp.B["x"])
|
|
||||||
assert.Equal(t, "1", tmp.C["ax"])
|
|
||||||
assert.Equal(t, "${b.x}", tmp.C["bx"])
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestIncludePathMultiple(t *testing.T) {
|
|
||||||
tmp := fixture()
|
|
||||||
m := interpolate{
|
|
||||||
fns: []LookupFunction{
|
|
||||||
IncludeLookupsInPath("a"),
|
|
||||||
IncludeLookupsInPath("b"),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
err := m.expand(&tmp)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
assert.Equal(t, "1", tmp.A["x"])
|
|
||||||
assert.Equal(t, "2", tmp.B["x"])
|
|
||||||
assert.Equal(t, "1", tmp.C["ax"])
|
|
||||||
assert.Equal(t, "2", tmp.C["bx"])
|
|
||||||
}
|
|
|
@ -1,48 +0,0 @@
|
||||||
package interpolation
|
|
||||||
|
|
||||||
import "reflect"
|
|
||||||
|
|
||||||
// String values in maps are not addressable and therefore not settable
|
|
||||||
// through Go's reflection mechanism. This interface solves this limitation
|
|
||||||
// by wrapping the setter differently for addressable values and map values.
|
|
||||||
type setter interface {
|
|
||||||
Set(string)
|
|
||||||
}
|
|
||||||
|
|
||||||
type nilSetter struct{}
|
|
||||||
|
|
||||||
func (nilSetter) Set(_ string) {
|
|
||||||
panic("nil setter")
|
|
||||||
}
|
|
||||||
|
|
||||||
type anySetter struct {
|
|
||||||
rv reflect.Value
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s anySetter) Set(str string) {
|
|
||||||
s.rv.SetString(str)
|
|
||||||
}
|
|
||||||
|
|
||||||
type mapSetter struct {
|
|
||||||
// map[string]string
|
|
||||||
m reflect.Value
|
|
||||||
|
|
||||||
// key
|
|
||||||
k reflect.Value
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s mapSetter) Set(str string) {
|
|
||||||
s.m.SetMapIndex(s.k, reflect.ValueOf(str))
|
|
||||||
}
|
|
||||||
|
|
||||||
type getter interface {
|
|
||||||
Get() string
|
|
||||||
}
|
|
||||||
|
|
||||||
type anyGetter struct {
|
|
||||||
rv reflect.Value
|
|
||||||
}
|
|
||||||
|
|
||||||
func (g anyGetter) Get() string {
|
|
||||||
return g.rv.String()
|
|
||||||
}
|
|
|
@ -0,0 +1,36 @@
|
||||||
|
package loader
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
"github.com/databricks/cli/bundle"
|
||||||
|
"github.com/databricks/cli/bundle/config"
|
||||||
|
"github.com/databricks/cli/libs/diag"
|
||||||
|
)
|
||||||
|
|
||||||
|
type entryPoint struct{}
|
||||||
|
|
||||||
|
// EntryPoint loads the entry point configuration.
|
||||||
|
func EntryPoint() bundle.Mutator {
|
||||||
|
return &entryPoint{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *entryPoint) Name() string {
|
||||||
|
return "EntryPoint"
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *entryPoint) Apply(_ context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||||
|
path, err := config.FileNames.FindInPath(b.RootPath)
|
||||||
|
if err != nil {
|
||||||
|
return diag.FromErr(err)
|
||||||
|
}
|
||||||
|
this, diags := config.Load(path)
|
||||||
|
if diags.HasError() {
|
||||||
|
return diags
|
||||||
|
}
|
||||||
|
err = b.Config.Merge(this)
|
||||||
|
if err != nil {
|
||||||
|
diags = diags.Extend(diag.FromErr(err))
|
||||||
|
}
|
||||||
|
return diags
|
||||||
|
}
|
|
@ -0,0 +1,26 @@
|
||||||
|
package loader_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/databricks/cli/bundle"
|
||||||
|
"github.com/databricks/cli/bundle/config/loader"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestEntryPointNoRootPath(t *testing.T) {
|
||||||
|
b := &bundle.Bundle{}
|
||||||
|
diags := bundle.Apply(context.Background(), b, loader.EntryPoint())
|
||||||
|
require.Error(t, diags.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestEntryPoint(t *testing.T) {
|
||||||
|
b := &bundle.Bundle{
|
||||||
|
RootPath: "testdata",
|
||||||
|
}
|
||||||
|
diags := bundle.Apply(context.Background(), b, loader.EntryPoint())
|
||||||
|
require.NoError(t, diags.Error())
|
||||||
|
assert.Equal(t, "loader_test", b.Config.Bundle.Name)
|
||||||
|
}
|
|
@ -1,4 +1,4 @@
|
||||||
package mutator
|
package loader
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
@ -6,6 +6,7 @@ import (
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle"
|
"github.com/databricks/cli/bundle"
|
||||||
"github.com/databricks/cli/bundle/config"
|
"github.com/databricks/cli/bundle/config"
|
||||||
|
"github.com/databricks/cli/libs/diag"
|
||||||
)
|
)
|
||||||
|
|
||||||
type processInclude struct {
|
type processInclude struct {
|
||||||
|
@ -25,10 +26,14 @@ func (m *processInclude) Name() string {
|
||||||
return fmt.Sprintf("ProcessInclude(%s)", m.relPath)
|
return fmt.Sprintf("ProcessInclude(%s)", m.relPath)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *processInclude) Apply(_ context.Context, b *bundle.Bundle) error {
|
func (m *processInclude) Apply(_ context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||||
this, err := config.Load(m.fullPath)
|
this, diags := config.Load(m.fullPath)
|
||||||
if err != nil {
|
if diags.HasError() {
|
||||||
return err
|
return diags
|
||||||
}
|
}
|
||||||
return b.Config.Merge(this)
|
err := b.Config.Merge(this)
|
||||||
|
if err != nil {
|
||||||
|
diags = diags.Extend(diag.FromErr(err))
|
||||||
|
}
|
||||||
|
return diags
|
||||||
}
|
}
|
|
@ -1,38 +1,35 @@
|
||||||
package mutator_test
|
package loader_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle"
|
"github.com/databricks/cli/bundle"
|
||||||
"github.com/databricks/cli/bundle/config"
|
"github.com/databricks/cli/bundle/config"
|
||||||
"github.com/databricks/cli/bundle/config/mutator"
|
"github.com/databricks/cli/bundle/config/loader"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestProcessInclude(t *testing.T) {
|
func TestProcessInclude(t *testing.T) {
|
||||||
b := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
|
RootPath: "testdata",
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
Path: t.TempDir(),
|
|
||||||
Workspace: config.Workspace{
|
Workspace: config.Workspace{
|
||||||
Host: "foo",
|
Host: "foo",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
relPath := "./file.yml"
|
m := loader.ProcessInclude(filepath.Join(b.RootPath, "host.yml"), "host.yml")
|
||||||
fullPath := filepath.Join(b.Config.Path, relPath)
|
assert.Equal(t, "ProcessInclude(host.yml)", m.Name())
|
||||||
f, err := os.Create(fullPath)
|
|
||||||
require.NoError(t, err)
|
|
||||||
fmt.Fprint(f, "workspace:\n host: bar\n")
|
|
||||||
f.Close()
|
|
||||||
|
|
||||||
|
// Assert the host value prior to applying the mutator
|
||||||
assert.Equal(t, "foo", b.Config.Workspace.Host)
|
assert.Equal(t, "foo", b.Config.Workspace.Host)
|
||||||
err = bundle.Apply(context.Background(), b, mutator.ProcessInclude(fullPath, relPath))
|
|
||||||
require.NoError(t, err)
|
// Apply the mutator and assert that the host value has been updated
|
||||||
|
diags := bundle.Apply(context.Background(), b, m)
|
||||||
|
require.NoError(t, diags.Error())
|
||||||
assert.Equal(t, "bar", b.Config.Workspace.Host)
|
assert.Equal(t, "bar", b.Config.Workspace.Host)
|
||||||
}
|
}
|
|
@ -1,27 +1,16 @@
|
||||||
package mutator
|
package loader
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"slices"
|
"slices"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle"
|
"github.com/databricks/cli/bundle"
|
||||||
"github.com/databricks/cli/bundle/config"
|
"github.com/databricks/cli/bundle/config"
|
||||||
"github.com/databricks/cli/bundle/env"
|
"github.com/databricks/cli/libs/diag"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Get extra include paths from environment variable
|
|
||||||
func getExtraIncludePaths(ctx context.Context) []string {
|
|
||||||
value, exists := env.Includes(ctx)
|
|
||||||
if !exists {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return strings.Split(value, string(os.PathListSeparator))
|
|
||||||
}
|
|
||||||
|
|
||||||
type processRootIncludes struct{}
|
type processRootIncludes struct{}
|
||||||
|
|
||||||
// ProcessRootIncludes expands the patterns in the configuration's include list
|
// ProcessRootIncludes expands the patterns in the configuration's include list
|
||||||
|
@ -34,7 +23,7 @@ func (m *processRootIncludes) Name() string {
|
||||||
return "ProcessRootIncludes"
|
return "ProcessRootIncludes"
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *processRootIncludes) Apply(ctx context.Context, b *bundle.Bundle) error {
|
func (m *processRootIncludes) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||||
var out []bundle.Mutator
|
var out []bundle.Mutator
|
||||||
|
|
||||||
// Map with files we've already seen to avoid loading them twice.
|
// Map with files we've already seen to avoid loading them twice.
|
||||||
|
@ -48,45 +37,33 @@ func (m *processRootIncludes) Apply(ctx context.Context, b *bundle.Bundle) error
|
||||||
// This is stored in the bundle configuration for observability.
|
// This is stored in the bundle configuration for observability.
|
||||||
var files []string
|
var files []string
|
||||||
|
|
||||||
// Converts extra include paths from environment variable to relative paths
|
|
||||||
for _, extraIncludePath := range getExtraIncludePaths(ctx) {
|
|
||||||
if filepath.IsAbs(extraIncludePath) {
|
|
||||||
rel, err := filepath.Rel(b.Config.Path, extraIncludePath)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("unable to include file '%s': %w", extraIncludePath, err)
|
|
||||||
}
|
|
||||||
extraIncludePath = rel
|
|
||||||
}
|
|
||||||
b.Config.Include = append(b.Config.Include, extraIncludePath)
|
|
||||||
}
|
|
||||||
|
|
||||||
// For each glob, find all files to load.
|
// For each glob, find all files to load.
|
||||||
// Ordering of the list of globs is maintained in the output.
|
// Ordering of the list of globs is maintained in the output.
|
||||||
// For matches that appear in multiple globs, only the first is kept.
|
// For matches that appear in multiple globs, only the first is kept.
|
||||||
for _, entry := range b.Config.Include {
|
for _, entry := range b.Config.Include {
|
||||||
// Include paths must be relative.
|
// Include paths must be relative.
|
||||||
if filepath.IsAbs(entry) {
|
if filepath.IsAbs(entry) {
|
||||||
return fmt.Errorf("%s: includes must be relative paths", entry)
|
return diag.Errorf("%s: includes must be relative paths", entry)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Anchor includes to the bundle root path.
|
// Anchor includes to the bundle root path.
|
||||||
matches, err := filepath.Glob(filepath.Join(b.Config.Path, entry))
|
matches, err := filepath.Glob(filepath.Join(b.RootPath, entry))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return diag.FromErr(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// If the entry is not a glob pattern and no matches found,
|
// If the entry is not a glob pattern and no matches found,
|
||||||
// return an error because the file defined is not found
|
// return an error because the file defined is not found
|
||||||
if len(matches) == 0 && !strings.ContainsAny(entry, "*?[") {
|
if len(matches) == 0 && !strings.ContainsAny(entry, "*?[") {
|
||||||
return fmt.Errorf("%s defined in 'include' section does not match any files", entry)
|
return diag.Errorf("%s defined in 'include' section does not match any files", entry)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Filter matches to ones we haven't seen yet.
|
// Filter matches to ones we haven't seen yet.
|
||||||
var includes []string
|
var includes []string
|
||||||
for _, match := range matches {
|
for _, match := range matches {
|
||||||
rel, err := filepath.Rel(b.Config.Path, match)
|
rel, err := filepath.Rel(b.RootPath, match)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return diag.FromErr(err)
|
||||||
}
|
}
|
||||||
if _, ok := seen[rel]; ok {
|
if _, ok := seen[rel]; ok {
|
||||||
continue
|
continue
|
||||||
|
@ -99,7 +76,7 @@ func (m *processRootIncludes) Apply(ctx context.Context, b *bundle.Bundle) error
|
||||||
slices.Sort(includes)
|
slices.Sort(includes)
|
||||||
files = append(files, includes...)
|
files = append(files, includes...)
|
||||||
for _, include := range includes {
|
for _, include := range includes {
|
||||||
out = append(out, ProcessInclude(filepath.Join(b.Config.Path, include), include))
|
out = append(out, ProcessInclude(filepath.Join(b.RootPath, include), include))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,113 @@
|
||||||
|
package loader_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"runtime"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/databricks/cli/bundle"
|
||||||
|
"github.com/databricks/cli/bundle/config"
|
||||||
|
"github.com/databricks/cli/bundle/config/loader"
|
||||||
|
"github.com/databricks/cli/internal/testutil"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestProcessRootIncludesEmpty(t *testing.T) {
|
||||||
|
b := &bundle.Bundle{
|
||||||
|
RootPath: ".",
|
||||||
|
}
|
||||||
|
diags := bundle.Apply(context.Background(), b, loader.ProcessRootIncludes())
|
||||||
|
require.NoError(t, diags.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestProcessRootIncludesAbs(t *testing.T) {
|
||||||
|
// remove this once equivalent tests for windows have been set up
|
||||||
|
// or this test has been fixed for windows
|
||||||
|
// date: 28 Nov 2022
|
||||||
|
if runtime.GOOS == "windows" {
|
||||||
|
t.Skip("skipping temperorilty to make windows unit tests green")
|
||||||
|
}
|
||||||
|
|
||||||
|
b := &bundle.Bundle{
|
||||||
|
RootPath: ".",
|
||||||
|
Config: config.Root{
|
||||||
|
Include: []string{
|
||||||
|
"/tmp/*.yml",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
diags := bundle.Apply(context.Background(), b, loader.ProcessRootIncludes())
|
||||||
|
require.True(t, diags.HasError())
|
||||||
|
assert.ErrorContains(t, diags.Error(), "must be relative paths")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestProcessRootIncludesSingleGlob(t *testing.T) {
|
||||||
|
b := &bundle.Bundle{
|
||||||
|
RootPath: t.TempDir(),
|
||||||
|
Config: config.Root{
|
||||||
|
Include: []string{
|
||||||
|
"*.yml",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
testutil.Touch(t, b.RootPath, "databricks.yml")
|
||||||
|
testutil.Touch(t, b.RootPath, "a.yml")
|
||||||
|
testutil.Touch(t, b.RootPath, "b.yml")
|
||||||
|
|
||||||
|
diags := bundle.Apply(context.Background(), b, loader.ProcessRootIncludes())
|
||||||
|
require.NoError(t, diags.Error())
|
||||||
|
assert.Equal(t, []string{"a.yml", "b.yml"}, b.Config.Include)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestProcessRootIncludesMultiGlob(t *testing.T) {
|
||||||
|
b := &bundle.Bundle{
|
||||||
|
RootPath: t.TempDir(),
|
||||||
|
Config: config.Root{
|
||||||
|
Include: []string{
|
||||||
|
"a*.yml",
|
||||||
|
"b*.yml",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
testutil.Touch(t, b.RootPath, "a1.yml")
|
||||||
|
testutil.Touch(t, b.RootPath, "b1.yml")
|
||||||
|
|
||||||
|
diags := bundle.Apply(context.Background(), b, loader.ProcessRootIncludes())
|
||||||
|
require.NoError(t, diags.Error())
|
||||||
|
assert.Equal(t, []string{"a1.yml", "b1.yml"}, b.Config.Include)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestProcessRootIncludesRemoveDups(t *testing.T) {
|
||||||
|
b := &bundle.Bundle{
|
||||||
|
RootPath: t.TempDir(),
|
||||||
|
Config: config.Root{
|
||||||
|
Include: []string{
|
||||||
|
"*.yml",
|
||||||
|
"*.yml",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
testutil.Touch(t, b.RootPath, "a.yml")
|
||||||
|
|
||||||
|
diags := bundle.Apply(context.Background(), b, loader.ProcessRootIncludes())
|
||||||
|
require.NoError(t, diags.Error())
|
||||||
|
assert.Equal(t, []string{"a.yml"}, b.Config.Include)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestProcessRootIncludesNotExists(t *testing.T) {
|
||||||
|
b := &bundle.Bundle{
|
||||||
|
RootPath: t.TempDir(),
|
||||||
|
Config: config.Root{
|
||||||
|
Include: []string{
|
||||||
|
"notexist.yml",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
diags := bundle.Apply(context.Background(), b, loader.ProcessRootIncludes())
|
||||||
|
require.True(t, diags.HasError())
|
||||||
|
assert.ErrorContains(t, diags.Error(), "notexist.yml defined in 'include' section does not match any files")
|
||||||
|
}
|
|
@ -0,0 +1,2 @@
|
||||||
|
bundle:
|
||||||
|
name: loader_test
|
|
@ -0,0 +1,2 @@
|
||||||
|
workspace:
|
||||||
|
host: bar
|
|
@ -1,7 +1,7 @@
|
||||||
package config
|
package config
|
||||||
|
|
||||||
type Lock struct {
|
type Lock struct {
|
||||||
// Enabled toggles deployment lock. True by default.
|
// Enabled toggles deployment lock. True by default except in development mode.
|
||||||
// Use a pointer value so that only explicitly configured values are set
|
// Use a pointer value so that only explicitly configured values are set
|
||||||
// and we don't merge configuration with zero-initialized values.
|
// and we don't merge configuration with zero-initialized values.
|
||||||
Enabled *bool `json:"enabled,omitempty"`
|
Enabled *bool `json:"enabled,omitempty"`
|
||||||
|
@ -11,9 +11,20 @@ type Lock struct {
|
||||||
Force bool `json:"force,omitempty"`
|
Force bool `json:"force,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// IsEnabled checks if the deployment lock is enabled.
|
||||||
func (lock Lock) IsEnabled() bool {
|
func (lock Lock) IsEnabled() bool {
|
||||||
if lock.Enabled != nil {
|
if lock.Enabled != nil {
|
||||||
return *lock.Enabled
|
return *lock.Enabled
|
||||||
}
|
}
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// IsExplicitlyEnabled checks if the deployment lock is explicitly enabled.
|
||||||
|
// Only returns true if locking is explicitly set using a command-line
|
||||||
|
// flag or configuration file.
|
||||||
|
func (lock Lock) IsExplicitlyEnabled() bool {
|
||||||
|
if lock.Enabled != nil {
|
||||||
|
return *lock.Enabled
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
|
@ -0,0 +1,50 @@
|
||||||
|
package mutator
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/databricks/cli/bundle"
|
||||||
|
"github.com/databricks/cli/libs/diag"
|
||||||
|
"github.com/databricks/cli/libs/env"
|
||||||
|
"github.com/databricks/cli/libs/filer"
|
||||||
|
"github.com/databricks/cli/libs/vfs"
|
||||||
|
)
|
||||||
|
|
||||||
|
const envDatabricksRuntimeVersion = "DATABRICKS_RUNTIME_VERSION"
|
||||||
|
|
||||||
|
type configureWSFS struct{}
|
||||||
|
|
||||||
|
func ConfigureWSFS() bundle.Mutator {
|
||||||
|
return &configureWSFS{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *configureWSFS) Name() string {
|
||||||
|
return "ConfigureWSFS"
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *configureWSFS) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||||
|
root := b.BundleRoot.Native()
|
||||||
|
|
||||||
|
// The bundle root must be located in /Workspace/
|
||||||
|
if !strings.HasPrefix(root, "/Workspace/") {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// The executable must be running on DBR.
|
||||||
|
if _, ok := env.Lookup(ctx, envDatabricksRuntimeVersion); !ok {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// If so, swap out vfs.Path instance of the sync root with one that
|
||||||
|
// makes all Workspace File System interactions extension aware.
|
||||||
|
p, err := vfs.NewFilerPath(ctx, root, func(path string) (filer.Filer, error) {
|
||||||
|
return filer.NewWorkspaceFilesExtensionsClient(b.WorkspaceClient(), path)
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return diag.FromErr(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
b.BundleRoot = p
|
||||||
|
return nil
|
||||||
|
}
|
|
@ -0,0 +1,38 @@
|
||||||
|
package mutator
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
"github.com/databricks/cli/bundle"
|
||||||
|
"github.com/databricks/cli/libs/diag"
|
||||||
|
"github.com/databricks/databricks-sdk-go/service/jobs"
|
||||||
|
)
|
||||||
|
|
||||||
|
type defaultQueueing struct{}
|
||||||
|
|
||||||
|
func DefaultQueueing() bundle.Mutator {
|
||||||
|
return &defaultQueueing{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *defaultQueueing) Name() string {
|
||||||
|
return "DefaultQueueing"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Enable queueing for jobs by default, following the behavior from API 2.2+.
|
||||||
|
// As of 2024-04, we're still using API 2.1 which has queueing disabled by default.
|
||||||
|
// This mutator makes sure queueing is enabled by default before we can adopt API 2.2.
|
||||||
|
func (m *defaultQueueing) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||||
|
r := b.Config.Resources
|
||||||
|
for i := range r.Jobs {
|
||||||
|
if r.Jobs[i].JobSettings == nil {
|
||||||
|
r.Jobs[i].JobSettings = &jobs.JobSettings{}
|
||||||
|
}
|
||||||
|
if r.Jobs[i].Queue != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
r.Jobs[i].Queue = &jobs.QueueSettings{
|
||||||
|
Enabled: true,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
|
@ -0,0 +1,103 @@
|
||||||
|
package mutator
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/databricks/cli/bundle"
|
||||||
|
"github.com/databricks/cli/bundle/config"
|
||||||
|
"github.com/databricks/cli/bundle/config/resources"
|
||||||
|
"github.com/databricks/databricks-sdk-go/service/jobs"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestDefaultQueueing(t *testing.T) {
|
||||||
|
m := DefaultQueueing()
|
||||||
|
assert.IsType(t, &defaultQueueing{}, m)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDefaultQueueingName(t *testing.T) {
|
||||||
|
m := DefaultQueueing()
|
||||||
|
assert.Equal(t, "DefaultQueueing", m.Name())
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDefaultQueueingApplyNoJobs(t *testing.T) {
|
||||||
|
b := &bundle.Bundle{
|
||||||
|
Config: config.Root{
|
||||||
|
Resources: config.Resources{},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
d := bundle.Apply(context.Background(), b, DefaultQueueing())
|
||||||
|
assert.Len(t, d, 0)
|
||||||
|
assert.Len(t, b.Config.Resources.Jobs, 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDefaultQueueingApplyJobsAlreadyEnabled(t *testing.T) {
|
||||||
|
b := &bundle.Bundle{
|
||||||
|
Config: config.Root{
|
||||||
|
Resources: config.Resources{
|
||||||
|
Jobs: map[string]*resources.Job{
|
||||||
|
"job": {
|
||||||
|
JobSettings: &jobs.JobSettings{
|
||||||
|
Queue: &jobs.QueueSettings{Enabled: true},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
d := bundle.Apply(context.Background(), b, DefaultQueueing())
|
||||||
|
assert.Len(t, d, 0)
|
||||||
|
assert.True(t, b.Config.Resources.Jobs["job"].Queue.Enabled)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDefaultQueueingApplyEnableQueueing(t *testing.T) {
|
||||||
|
b := &bundle.Bundle{
|
||||||
|
Config: config.Root{
|
||||||
|
Resources: config.Resources{
|
||||||
|
Jobs: map[string]*resources.Job{
|
||||||
|
"job": {
|
||||||
|
JobSettings: &jobs.JobSettings{
|
||||||
|
Name: "job",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
d := bundle.Apply(context.Background(), b, DefaultQueueing())
|
||||||
|
assert.Len(t, d, 0)
|
||||||
|
assert.NotNil(t, b.Config.Resources.Jobs["job"].Queue)
|
||||||
|
assert.True(t, b.Config.Resources.Jobs["job"].Queue.Enabled)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDefaultQueueingApplyWithMultipleJobs(t *testing.T) {
|
||||||
|
b := &bundle.Bundle{
|
||||||
|
Config: config.Root{
|
||||||
|
Resources: config.Resources{
|
||||||
|
Jobs: map[string]*resources.Job{
|
||||||
|
"job1": {
|
||||||
|
JobSettings: &jobs.JobSettings{
|
||||||
|
Queue: &jobs.QueueSettings{Enabled: false},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"job2": {
|
||||||
|
JobSettings: &jobs.JobSettings{
|
||||||
|
Name: "job",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"job3": {
|
||||||
|
JobSettings: &jobs.JobSettings{
|
||||||
|
Queue: &jobs.QueueSettings{Enabled: true},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
d := bundle.Apply(context.Background(), b, DefaultQueueing())
|
||||||
|
assert.Len(t, d, 0)
|
||||||
|
assert.False(t, b.Config.Resources.Jobs["job1"].Queue.Enabled)
|
||||||
|
assert.True(t, b.Config.Resources.Jobs["job2"].Queue.Enabled)
|
||||||
|
assert.True(t, b.Config.Resources.Jobs["job3"].Queue.Enabled)
|
||||||
|
}
|
|
@ -6,6 +6,7 @@ import (
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle"
|
"github.com/databricks/cli/bundle"
|
||||||
"github.com/databricks/cli/bundle/config"
|
"github.com/databricks/cli/bundle/config"
|
||||||
|
"github.com/databricks/cli/libs/diag"
|
||||||
)
|
)
|
||||||
|
|
||||||
type defineDefaultTarget struct {
|
type defineDefaultTarget struct {
|
||||||
|
@ -24,7 +25,7 @@ func (m *defineDefaultTarget) Name() string {
|
||||||
return fmt.Sprintf("DefineDefaultTarget(%s)", m.name)
|
return fmt.Sprintf("DefineDefaultTarget(%s)", m.name)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *defineDefaultTarget) Apply(_ context.Context, b *bundle.Bundle) error {
|
func (m *defineDefaultTarget) Apply(_ context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||||
// Nothing to do if the configuration has at least 1 target.
|
// Nothing to do if the configuration has at least 1 target.
|
||||||
if len(b.Config.Targets) > 0 {
|
if len(b.Config.Targets) > 0 {
|
||||||
return nil
|
return nil
|
||||||
|
|
|
@ -13,8 +13,9 @@ import (
|
||||||
|
|
||||||
func TestDefaultTarget(t *testing.T) {
|
func TestDefaultTarget(t *testing.T) {
|
||||||
b := &bundle.Bundle{}
|
b := &bundle.Bundle{}
|
||||||
err := bundle.Apply(context.Background(), b, mutator.DefineDefaultTarget())
|
diags := bundle.Apply(context.Background(), b, mutator.DefineDefaultTarget())
|
||||||
require.NoError(t, err)
|
require.NoError(t, diags.Error())
|
||||||
|
|
||||||
env, ok := b.Config.Targets["default"]
|
env, ok := b.Config.Targets["default"]
|
||||||
assert.True(t, ok)
|
assert.True(t, ok)
|
||||||
assert.Equal(t, &config.Target{}, env)
|
assert.Equal(t, &config.Target{}, env)
|
||||||
|
@ -28,8 +29,9 @@ func TestDefaultTargetAlreadySpecified(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
err := bundle.Apply(context.Background(), b, mutator.DefineDefaultTarget())
|
diags := bundle.Apply(context.Background(), b, mutator.DefineDefaultTarget())
|
||||||
require.NoError(t, err)
|
require.NoError(t, diags.Error())
|
||||||
|
|
||||||
_, ok := b.Config.Targets["default"]
|
_, ok := b.Config.Targets["default"]
|
||||||
assert.False(t, ok)
|
assert.False(t, ok)
|
||||||
}
|
}
|
||||||
|
|
|
@ -2,10 +2,10 @@ package mutator
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
|
||||||
"path"
|
"path"
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle"
|
"github.com/databricks/cli/bundle"
|
||||||
|
"github.com/databricks/cli/libs/diag"
|
||||||
)
|
)
|
||||||
|
|
||||||
type defineDefaultWorkspacePaths struct{}
|
type defineDefaultWorkspacePaths struct{}
|
||||||
|
@ -19,10 +19,10 @@ func (m *defineDefaultWorkspacePaths) Name() string {
|
||||||
return "DefaultWorkspacePaths"
|
return "DefaultWorkspacePaths"
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *defineDefaultWorkspacePaths) Apply(ctx context.Context, b *bundle.Bundle) error {
|
func (m *defineDefaultWorkspacePaths) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||||
root := b.Config.Workspace.RootPath
|
root := b.Config.Workspace.RootPath
|
||||||
if root == "" {
|
if root == "" {
|
||||||
return fmt.Errorf("unable to define default workspace paths: workspace root not defined")
|
return diag.Errorf("unable to define default workspace paths: workspace root not defined")
|
||||||
}
|
}
|
||||||
|
|
||||||
if b.Config.Workspace.FilePath == "" {
|
if b.Config.Workspace.FilePath == "" {
|
||||||
|
|
|
@ -19,8 +19,8 @@ func TestDefineDefaultWorkspacePaths(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
err := bundle.Apply(context.Background(), b, mutator.DefineDefaultWorkspacePaths())
|
diags := bundle.Apply(context.Background(), b, mutator.DefineDefaultWorkspacePaths())
|
||||||
require.NoError(t, err)
|
require.NoError(t, diags.Error())
|
||||||
assert.Equal(t, "/files", b.Config.Workspace.FilePath)
|
assert.Equal(t, "/files", b.Config.Workspace.FilePath)
|
||||||
assert.Equal(t, "/artifacts", b.Config.Workspace.ArtifactPath)
|
assert.Equal(t, "/artifacts", b.Config.Workspace.ArtifactPath)
|
||||||
assert.Equal(t, "/state", b.Config.Workspace.StatePath)
|
assert.Equal(t, "/state", b.Config.Workspace.StatePath)
|
||||||
|
@ -37,8 +37,8 @@ func TestDefineDefaultWorkspacePathsAlreadySet(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
err := bundle.Apply(context.Background(), b, mutator.DefineDefaultWorkspacePaths())
|
diags := bundle.Apply(context.Background(), b, mutator.DefineDefaultWorkspacePaths())
|
||||||
require.NoError(t, err)
|
require.NoError(t, diags.Error())
|
||||||
assert.Equal(t, "/foo/bar", b.Config.Workspace.FilePath)
|
assert.Equal(t, "/foo/bar", b.Config.Workspace.FilePath)
|
||||||
assert.Equal(t, "/foo/bar", b.Config.Workspace.ArtifactPath)
|
assert.Equal(t, "/foo/bar", b.Config.Workspace.ArtifactPath)
|
||||||
assert.Equal(t, "/foo/bar", b.Config.Workspace.StatePath)
|
assert.Equal(t, "/foo/bar", b.Config.Workspace.StatePath)
|
||||||
|
|
|
@ -5,6 +5,7 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle"
|
"github.com/databricks/cli/bundle"
|
||||||
|
"github.com/databricks/cli/libs/diag"
|
||||||
)
|
)
|
||||||
|
|
||||||
type defineDefaultWorkspaceRoot struct{}
|
type defineDefaultWorkspaceRoot struct{}
|
||||||
|
@ -18,17 +19,17 @@ func (m *defineDefaultWorkspaceRoot) Name() string {
|
||||||
return "DefineDefaultWorkspaceRoot"
|
return "DefineDefaultWorkspaceRoot"
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *defineDefaultWorkspaceRoot) Apply(ctx context.Context, b *bundle.Bundle) error {
|
func (m *defineDefaultWorkspaceRoot) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||||
if b.Config.Workspace.RootPath != "" {
|
if b.Config.Workspace.RootPath != "" {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if b.Config.Bundle.Name == "" {
|
if b.Config.Bundle.Name == "" {
|
||||||
return fmt.Errorf("unable to define default workspace root: bundle name not defined")
|
return diag.Errorf("unable to define default workspace root: bundle name not defined")
|
||||||
}
|
}
|
||||||
|
|
||||||
if b.Config.Bundle.Target == "" {
|
if b.Config.Bundle.Target == "" {
|
||||||
return fmt.Errorf("unable to define default workspace root: bundle target not selected")
|
return diag.Errorf("unable to define default workspace root: bundle target not selected")
|
||||||
}
|
}
|
||||||
|
|
||||||
b.Config.Workspace.RootPath = fmt.Sprintf(
|
b.Config.Workspace.RootPath = fmt.Sprintf(
|
||||||
|
|
|
@ -20,7 +20,8 @@ func TestDefaultWorkspaceRoot(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
err := bundle.Apply(context.Background(), b, mutator.DefineDefaultWorkspaceRoot())
|
diags := bundle.Apply(context.Background(), b, mutator.DefineDefaultWorkspaceRoot())
|
||||||
require.NoError(t, err)
|
require.NoError(t, diags.Error())
|
||||||
|
|
||||||
assert.Equal(t, "~/.bundle/name/environment", b.Config.Workspace.RootPath)
|
assert.Equal(t, "~/.bundle/name/environment", b.Config.Workspace.RootPath)
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,66 @@
|
||||||
|
package mutator
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/databricks/cli/bundle"
|
||||||
|
"github.com/databricks/cli/libs/diag"
|
||||||
|
"github.com/databricks/cli/libs/dyn"
|
||||||
|
)
|
||||||
|
|
||||||
|
type environmentsToTargets struct{}
|
||||||
|
|
||||||
|
func EnvironmentsToTargets() bundle.Mutator {
|
||||||
|
return &environmentsToTargets{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *environmentsToTargets) Name() string {
|
||||||
|
return "EnvironmentsToTargets"
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *environmentsToTargets) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||||
|
// Short circuit if the "environments" key is not set.
|
||||||
|
// This is the common case.
|
||||||
|
if b.Config.Environments == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// The "environments" key is set; validate and rewrite it to "targets".
|
||||||
|
err := b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) {
|
||||||
|
environments := v.Get("environments")
|
||||||
|
targets := v.Get("targets")
|
||||||
|
|
||||||
|
// Return an error if both "environments" and "targets" are set.
|
||||||
|
if environments.Kind() != dyn.KindInvalid && targets.Kind() != dyn.KindInvalid {
|
||||||
|
return dyn.InvalidValue, fmt.Errorf(
|
||||||
|
"both 'environments' and 'targets' are specified; only 'targets' should be used: %s",
|
||||||
|
environments.Location().String(),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Rewrite "environments" to "targets".
|
||||||
|
if environments.Kind() != dyn.KindInvalid && targets.Kind() == dyn.KindInvalid {
|
||||||
|
nv, err := dyn.Set(v, "targets", environments)
|
||||||
|
if err != nil {
|
||||||
|
return dyn.InvalidValue, err
|
||||||
|
}
|
||||||
|
// Drop the "environments" key.
|
||||||
|
return dyn.Walk(nv, func(p dyn.Path, v dyn.Value) (dyn.Value, error) {
|
||||||
|
switch len(p) {
|
||||||
|
case 0:
|
||||||
|
return v, nil
|
||||||
|
case 1:
|
||||||
|
if p[0] == dyn.Key("environments") {
|
||||||
|
return v, dyn.ErrDrop
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return v, dyn.ErrSkip
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
return v, nil
|
||||||
|
})
|
||||||
|
|
||||||
|
return diag.FromErr(err)
|
||||||
|
}
|
|
@ -0,0 +1,66 @@
|
||||||
|
package mutator_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/databricks/cli/bundle"
|
||||||
|
"github.com/databricks/cli/bundle/config"
|
||||||
|
"github.com/databricks/cli/bundle/config/mutator"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestEnvironmentsToTargetsWithBothDefined(t *testing.T) {
|
||||||
|
b := &bundle.Bundle{
|
||||||
|
Config: config.Root{
|
||||||
|
Environments: map[string]*config.Target{
|
||||||
|
"name": {
|
||||||
|
Mode: config.Development,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Targets: map[string]*config.Target{
|
||||||
|
"name": {
|
||||||
|
Mode: config.Development,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
diags := bundle.Apply(context.Background(), b, mutator.EnvironmentsToTargets())
|
||||||
|
assert.ErrorContains(t, diags.Error(), `both 'environments' and 'targets' are specified;`)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestEnvironmentsToTargetsWithEnvironmentsDefined(t *testing.T) {
|
||||||
|
b := &bundle.Bundle{
|
||||||
|
Config: config.Root{
|
||||||
|
Environments: map[string]*config.Target{
|
||||||
|
"name": {
|
||||||
|
Mode: config.Development,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
diags := bundle.Apply(context.Background(), b, mutator.EnvironmentsToTargets())
|
||||||
|
require.NoError(t, diags.Error())
|
||||||
|
assert.Len(t, b.Config.Environments, 0)
|
||||||
|
assert.Len(t, b.Config.Targets, 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestEnvironmentsToTargetsWithTargetsDefined(t *testing.T) {
|
||||||
|
b := &bundle.Bundle{
|
||||||
|
Config: config.Root{
|
||||||
|
Targets: map[string]*config.Target{
|
||||||
|
"name": {
|
||||||
|
Mode: config.Development,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
diags := bundle.Apply(context.Background(), b, mutator.EnvironmentsToTargets())
|
||||||
|
require.NoError(t, diags.Error())
|
||||||
|
assert.Len(t, b.Config.Environments, 0)
|
||||||
|
assert.Len(t, b.Config.Targets, 1)
|
||||||
|
}
|
|
@ -7,7 +7,8 @@ import (
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle"
|
"github.com/databricks/cli/bundle"
|
||||||
"github.com/databricks/cli/bundle/libraries"
|
"github.com/databricks/cli/bundle/libraries"
|
||||||
"github.com/databricks/databricks-sdk-go/service/pipelines"
|
"github.com/databricks/cli/libs/diag"
|
||||||
|
"github.com/databricks/cli/libs/dyn"
|
||||||
)
|
)
|
||||||
|
|
||||||
type expandPipelineGlobPaths struct{}
|
type expandPipelineGlobPaths struct{}
|
||||||
|
@ -16,77 +17,96 @@ func ExpandPipelineGlobPaths() bundle.Mutator {
|
||||||
return &expandPipelineGlobPaths{}
|
return &expandPipelineGlobPaths{}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *expandPipelineGlobPaths) Apply(_ context.Context, b *bundle.Bundle) error {
|
func (m *expandPipelineGlobPaths) expandLibrary(v dyn.Value) ([]dyn.Value, error) {
|
||||||
for key, pipeline := range b.Config.Resources.Pipelines {
|
// Probe for the path field in the library.
|
||||||
dir, err := pipeline.ConfigFileDirectory()
|
for _, p := range []dyn.Path{
|
||||||
|
dyn.NewPath(dyn.Key("notebook"), dyn.Key("path")),
|
||||||
|
dyn.NewPath(dyn.Key("file"), dyn.Key("path")),
|
||||||
|
} {
|
||||||
|
pv, err := dyn.GetByPath(v, p)
|
||||||
|
if dyn.IsNoSuchKeyError(err) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("unable to determine directory for pipeline %s: %w", key, err)
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
expandedLibraries := make([]pipelines.PipelineLibrary, 0)
|
// If the path is empty or not a local path, return the original value.
|
||||||
for i := 0; i < len(pipeline.Libraries); i++ {
|
path := pv.MustString()
|
||||||
|
if path == "" || !libraries.IsLocalPath(path) {
|
||||||
|
return []dyn.Value{v}, nil
|
||||||
|
}
|
||||||
|
|
||||||
library := &pipeline.Libraries[i]
|
dir, err := v.Location().Directory()
|
||||||
path := getGlobPatternToExpand(library)
|
if err != nil {
|
||||||
if path == "" || !libraries.IsLocalPath(path) {
|
return nil, err
|
||||||
expandedLibraries = append(expandedLibraries, *library)
|
}
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
matches, err := filepath.Glob(filepath.Join(dir, path))
|
matches, err := filepath.Glob(filepath.Join(dir, path))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// If there are no matches, return the original value.
|
||||||
|
if len(matches) == 0 {
|
||||||
|
return []dyn.Value{v}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Emit a new value for each match.
|
||||||
|
var ev []dyn.Value
|
||||||
|
for _, match := range matches {
|
||||||
|
m, err := filepath.Rel(dir, match)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
nv, err := dyn.SetByPath(v, p, dyn.NewValue(m, pv.Locations()))
|
||||||
if len(matches) == 0 {
|
if err != nil {
|
||||||
expandedLibraries = append(expandedLibraries, *library)
|
return nil, err
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, match := range matches {
|
|
||||||
m, err := filepath.Rel(dir, match)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
expandedLibraries = append(expandedLibraries, cloneWithPath(library, m))
|
|
||||||
}
|
}
|
||||||
|
ev = append(ev, nv)
|
||||||
}
|
}
|
||||||
pipeline.Libraries = expandedLibraries
|
|
||||||
|
return ev, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
// Neither of the library paths were found. This is likely an invalid node,
|
||||||
|
// but it isn't this mutator's job to enforce that. Return the original value.
|
||||||
|
return []dyn.Value{v}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func getGlobPatternToExpand(library *pipelines.PipelineLibrary) string {
|
func (m *expandPipelineGlobPaths) expandSequence(p dyn.Path, v dyn.Value) (dyn.Value, error) {
|
||||||
if library.File != nil {
|
s, ok := v.AsSequence()
|
||||||
return library.File.Path
|
if !ok {
|
||||||
|
return dyn.InvalidValue, fmt.Errorf("expected sequence, got %s", v.Kind())
|
||||||
}
|
}
|
||||||
|
|
||||||
if library.Notebook != nil {
|
var vs []dyn.Value
|
||||||
return library.Notebook.Path
|
for _, sv := range s {
|
||||||
|
v, err := m.expandLibrary(sv)
|
||||||
|
if err != nil {
|
||||||
|
return dyn.InvalidValue, err
|
||||||
|
}
|
||||||
|
|
||||||
|
vs = append(vs, v...)
|
||||||
}
|
}
|
||||||
|
|
||||||
return ""
|
return dyn.NewValue(vs, v.Locations()), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func cloneWithPath(library *pipelines.PipelineLibrary, path string) pipelines.PipelineLibrary {
|
func (m *expandPipelineGlobPaths) Apply(_ context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||||
if library.File != nil {
|
err := b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) {
|
||||||
return pipelines.PipelineLibrary{
|
p := dyn.NewPattern(
|
||||||
File: &pipelines.FileLibrary{
|
dyn.Key("resources"),
|
||||||
Path: path,
|
dyn.Key("pipelines"),
|
||||||
},
|
dyn.AnyKey(),
|
||||||
}
|
dyn.Key("libraries"),
|
||||||
}
|
)
|
||||||
|
|
||||||
if library.Notebook != nil {
|
// Visit each pipeline's "libraries" field and expand any glob patterns.
|
||||||
return pipelines.PipelineLibrary{
|
return dyn.MapByPattern(v, p, m.expandSequence)
|
||||||
Notebook: &pipelines.NotebookLibrary{
|
})
|
||||||
Path: path,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return pipelines.PipelineLibrary{}
|
return diag.FromErr(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (*expandPipelineGlobPaths) Name() string {
|
func (*expandPipelineGlobPaths) Name() string {
|
||||||
|
|
|
@ -8,8 +8,8 @@ import (
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle"
|
"github.com/databricks/cli/bundle"
|
||||||
"github.com/databricks/cli/bundle/config"
|
"github.com/databricks/cli/bundle/config"
|
||||||
"github.com/databricks/cli/bundle/config/paths"
|
|
||||||
"github.com/databricks/cli/bundle/config/resources"
|
"github.com/databricks/cli/bundle/config/resources"
|
||||||
|
"github.com/databricks/cli/bundle/internal/bundletest"
|
||||||
"github.com/databricks/databricks-sdk-go/service/compute"
|
"github.com/databricks/databricks-sdk-go/service/compute"
|
||||||
"github.com/databricks/databricks-sdk-go/service/pipelines"
|
"github.com/databricks/databricks-sdk-go/service/pipelines"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
@ -35,16 +35,17 @@ func TestExpandGlobPathsInPipelines(t *testing.T) {
|
||||||
touchEmptyFile(t, filepath.Join(dir, "test1.py"))
|
touchEmptyFile(t, filepath.Join(dir, "test1.py"))
|
||||||
touchEmptyFile(t, filepath.Join(dir, "test/test2.py"))
|
touchEmptyFile(t, filepath.Join(dir, "test/test2.py"))
|
||||||
touchEmptyFile(t, filepath.Join(dir, "test/test3.py"))
|
touchEmptyFile(t, filepath.Join(dir, "test/test3.py"))
|
||||||
|
touchEmptyFile(t, filepath.Join(dir, "relative/test4.py"))
|
||||||
|
touchEmptyFile(t, filepath.Join(dir, "relative/test5.py"))
|
||||||
|
touchEmptyFile(t, filepath.Join(dir, "skip/test6.py"))
|
||||||
|
touchEmptyFile(t, filepath.Join(dir, "skip/test7.py"))
|
||||||
|
|
||||||
b := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
|
RootPath: dir,
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
Path: dir,
|
|
||||||
Resources: config.Resources{
|
Resources: config.Resources{
|
||||||
Pipelines: map[string]*resources.Pipeline{
|
Pipelines: map[string]*resources.Pipeline{
|
||||||
"pipeline": {
|
"pipeline": {
|
||||||
Paths: paths.Paths{
|
|
||||||
ConfigFilePath: filepath.Join(dir, "resource.yml"),
|
|
||||||
},
|
|
||||||
PipelineSpec: &pipelines.PipelineSpec{
|
PipelineSpec: &pipelines.PipelineSpec{
|
||||||
Libraries: []pipelines.PipelineLibrary{
|
Libraries: []pipelines.PipelineLibrary{
|
||||||
{
|
{
|
||||||
|
@ -57,7 +58,13 @@ func TestExpandGlobPathsInPipelines(t *testing.T) {
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
File: &pipelines.FileLibrary{
|
File: &pipelines.FileLibrary{
|
||||||
Path: "./**/*.py",
|
Path: "./test/*.py",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
// This value is annotated to be defined in the "./relative" directory.
|
||||||
|
File: &pipelines.FileLibrary{
|
||||||
|
Path: "./*.py",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -98,12 +105,15 @@ func TestExpandGlobPathsInPipelines(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bundletest.SetLocation(b, ".", filepath.Join(dir, "resource.yml"))
|
||||||
|
bundletest.SetLocation(b, "resources.pipelines.pipeline.libraries[3]", filepath.Join(dir, "relative", "resource.yml"))
|
||||||
|
|
||||||
m := ExpandPipelineGlobPaths()
|
m := ExpandPipelineGlobPaths()
|
||||||
err := bundle.Apply(context.Background(), b, m)
|
diags := bundle.Apply(context.Background(), b, m)
|
||||||
require.NoError(t, err)
|
require.NoError(t, diags.Error())
|
||||||
|
|
||||||
libraries := b.Config.Resources.Pipelines["pipeline"].Libraries
|
libraries := b.Config.Resources.Pipelines["pipeline"].Libraries
|
||||||
require.Len(t, libraries, 11)
|
require.Len(t, libraries, 13)
|
||||||
|
|
||||||
// Making sure glob patterns are expanded correctly
|
// Making sure glob patterns are expanded correctly
|
||||||
require.True(t, containsNotebook(libraries, filepath.Join("test", "test2.ipynb")))
|
require.True(t, containsNotebook(libraries, filepath.Join("test", "test2.ipynb")))
|
||||||
|
@ -111,6 +121,10 @@ func TestExpandGlobPathsInPipelines(t *testing.T) {
|
||||||
require.True(t, containsFile(libraries, filepath.Join("test", "test2.py")))
|
require.True(t, containsFile(libraries, filepath.Join("test", "test2.py")))
|
||||||
require.True(t, containsFile(libraries, filepath.Join("test", "test3.py")))
|
require.True(t, containsFile(libraries, filepath.Join("test", "test3.py")))
|
||||||
|
|
||||||
|
// These patterns are defined relative to "./relative"
|
||||||
|
require.True(t, containsFile(libraries, "test4.py"))
|
||||||
|
require.True(t, containsFile(libraries, "test5.py"))
|
||||||
|
|
||||||
// Making sure exact file references work as well
|
// Making sure exact file references work as well
|
||||||
require.True(t, containsNotebook(libraries, "test1.ipynb"))
|
require.True(t, containsNotebook(libraries, "test1.ipynb"))
|
||||||
|
|
||||||
|
|
|
@ -7,6 +7,7 @@ import (
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle"
|
"github.com/databricks/cli/bundle"
|
||||||
|
"github.com/databricks/cli/libs/diag"
|
||||||
)
|
)
|
||||||
|
|
||||||
type expandWorkspaceRoot struct{}
|
type expandWorkspaceRoot struct{}
|
||||||
|
@ -20,15 +21,15 @@ func (m *expandWorkspaceRoot) Name() string {
|
||||||
return "ExpandWorkspaceRoot"
|
return "ExpandWorkspaceRoot"
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *expandWorkspaceRoot) Apply(ctx context.Context, b *bundle.Bundle) error {
|
func (m *expandWorkspaceRoot) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||||
root := b.Config.Workspace.RootPath
|
root := b.Config.Workspace.RootPath
|
||||||
if root == "" {
|
if root == "" {
|
||||||
return fmt.Errorf("unable to expand workspace root: workspace root not defined")
|
return diag.Errorf("unable to expand workspace root: workspace root not defined")
|
||||||
}
|
}
|
||||||
|
|
||||||
currentUser := b.Config.Workspace.CurrentUser
|
currentUser := b.Config.Workspace.CurrentUser
|
||||||
if currentUser == nil || currentUser.UserName == "" {
|
if currentUser == nil || currentUser.UserName == "" {
|
||||||
return fmt.Errorf("unable to expand workspace root: current user not set")
|
return diag.Errorf("unable to expand workspace root: current user not set")
|
||||||
}
|
}
|
||||||
|
|
||||||
if strings.HasPrefix(root, "~/") {
|
if strings.HasPrefix(root, "~/") {
|
||||||
|
|
|
@ -25,8 +25,8 @@ func TestExpandWorkspaceRoot(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
err := bundle.Apply(context.Background(), b, mutator.ExpandWorkspaceRoot())
|
diags := bundle.Apply(context.Background(), b, mutator.ExpandWorkspaceRoot())
|
||||||
require.NoError(t, err)
|
require.NoError(t, diags.Error())
|
||||||
assert.Equal(t, "/Users/jane@doe.com/foo", b.Config.Workspace.RootPath)
|
assert.Equal(t, "/Users/jane@doe.com/foo", b.Config.Workspace.RootPath)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -43,8 +43,8 @@ func TestExpandWorkspaceRootDoesNothing(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
err := bundle.Apply(context.Background(), b, mutator.ExpandWorkspaceRoot())
|
diags := bundle.Apply(context.Background(), b, mutator.ExpandWorkspaceRoot())
|
||||||
require.NoError(t, err)
|
require.NoError(t, diags.Error())
|
||||||
assert.Equal(t, "/Users/charly@doe.com/foo", b.Config.Workspace.RootPath)
|
assert.Equal(t, "/Users/charly@doe.com/foo", b.Config.Workspace.RootPath)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -60,8 +60,8 @@ func TestExpandWorkspaceRootWithoutRoot(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
err := bundle.Apply(context.Background(), b, mutator.ExpandWorkspaceRoot())
|
diags := bundle.Apply(context.Background(), b, mutator.ExpandWorkspaceRoot())
|
||||||
require.Error(t, err)
|
require.True(t, diags.HasError())
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestExpandWorkspaceRootWithoutCurrentUser(t *testing.T) {
|
func TestExpandWorkspaceRootWithoutCurrentUser(t *testing.T) {
|
||||||
|
@ -72,6 +72,6 @@ func TestExpandWorkspaceRootWithoutCurrentUser(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
err := bundle.Apply(context.Background(), b, mutator.ExpandWorkspaceRoot())
|
diags := bundle.Apply(context.Background(), b, mutator.ExpandWorkspaceRoot())
|
||||||
require.Error(t, err)
|
require.True(t, diags.HasError())
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,35 +0,0 @@
|
||||||
package mutator
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle"
|
|
||||||
)
|
|
||||||
|
|
||||||
type ifMutator struct {
|
|
||||||
condition func(*bundle.Bundle) bool
|
|
||||||
onTrueMutator bundle.Mutator
|
|
||||||
onFalseMutator bundle.Mutator
|
|
||||||
}
|
|
||||||
|
|
||||||
func If(
|
|
||||||
condition func(*bundle.Bundle) bool,
|
|
||||||
onTrueMutator bundle.Mutator,
|
|
||||||
onFalseMutator bundle.Mutator,
|
|
||||||
) bundle.Mutator {
|
|
||||||
return &ifMutator{
|
|
||||||
condition, onTrueMutator, onFalseMutator,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *ifMutator) Apply(ctx context.Context, b *bundle.Bundle) error {
|
|
||||||
if m.condition(b) {
|
|
||||||
return bundle.Apply(ctx, b, m.onTrueMutator)
|
|
||||||
} else {
|
|
||||||
return bundle.Apply(ctx, b, m.onFalseMutator)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *ifMutator) Name() string {
|
|
||||||
return "If"
|
|
||||||
}
|
|
|
@ -5,6 +5,7 @@ import (
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle"
|
"github.com/databricks/cli/bundle"
|
||||||
"github.com/databricks/cli/bundle/config/variable"
|
"github.com/databricks/cli/bundle/config/variable"
|
||||||
|
"github.com/databricks/cli/libs/diag"
|
||||||
)
|
)
|
||||||
|
|
||||||
type initializeVariables struct{}
|
type initializeVariables struct{}
|
||||||
|
@ -18,7 +19,7 @@ func (m *initializeVariables) Name() string {
|
||||||
return "InitializeVariables"
|
return "InitializeVariables"
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *initializeVariables) Apply(ctx context.Context, b *bundle.Bundle) error {
|
func (m *initializeVariables) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||||
vars := b.Config.Variables
|
vars := b.Config.Variables
|
||||||
for k, v := range vars {
|
for k, v := range vars {
|
||||||
if v == nil {
|
if v == nil {
|
||||||
|
|
|
@ -23,8 +23,8 @@ func TestInitializeVariables(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
err := bundle.Apply(context.Background(), b, mutator.InitializeVariables())
|
diags := bundle.Apply(context.Background(), b, mutator.InitializeVariables())
|
||||||
require.NoError(t, err)
|
require.NoError(t, diags.Error())
|
||||||
assert.NotNil(t, b.Config.Variables["foo"])
|
assert.NotNil(t, b.Config.Variables["foo"])
|
||||||
assert.NotNil(t, b.Config.Variables["bar"])
|
assert.NotNil(t, b.Config.Variables["bar"])
|
||||||
assert.Equal(t, "This is a description", b.Config.Variables["bar"].Description)
|
assert.Equal(t, "This is a description", b.Config.Variables["bar"].Description)
|
||||||
|
@ -36,7 +36,7 @@ func TestInitializeVariablesWithoutVariables(t *testing.T) {
|
||||||
Variables: nil,
|
Variables: nil,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
err := bundle.Apply(context.Background(), b, mutator.InitializeVariables())
|
diags := bundle.Apply(context.Background(), b, mutator.InitializeVariables())
|
||||||
require.NoError(t, err)
|
require.NoError(t, diags.Error())
|
||||||
assert.Nil(t, b.Config.Variables)
|
assert.Nil(t, b.Config.Variables)
|
||||||
}
|
}
|
||||||
|
|
|
@ -4,6 +4,7 @@ import (
|
||||||
"context"
|
"context"
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle"
|
"github.com/databricks/cli/bundle"
|
||||||
|
"github.com/databricks/cli/libs/diag"
|
||||||
)
|
)
|
||||||
|
|
||||||
type initializeWorkspaceClient struct{}
|
type initializeWorkspaceClient struct{}
|
||||||
|
@ -19,7 +20,7 @@ func (m *initializeWorkspaceClient) Name() string {
|
||||||
// Apply initializes the workspace client for the bundle. We do this here so
|
// Apply initializes the workspace client for the bundle. We do this here so
|
||||||
// downstream calls to b.WorkspaceClient() do not panic if there's an error in the
|
// downstream calls to b.WorkspaceClient() do not panic if there's an error in the
|
||||||
// auth configuration.
|
// auth configuration.
|
||||||
func (m *initializeWorkspaceClient) Apply(_ context.Context, b *bundle.Bundle) error {
|
func (m *initializeWorkspaceClient) Apply(_ context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||||
_, err := b.InitializeWorkspaceClient()
|
_, err := b.InitializeWorkspaceClient()
|
||||||
return err
|
return diag.FromErr(err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -5,6 +5,7 @@ import (
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle"
|
"github.com/databricks/cli/bundle"
|
||||||
|
"github.com/databricks/cli/libs/diag"
|
||||||
"github.com/databricks/cli/libs/git"
|
"github.com/databricks/cli/libs/git"
|
||||||
"github.com/databricks/cli/libs/log"
|
"github.com/databricks/cli/libs/log"
|
||||||
)
|
)
|
||||||
|
@ -19,11 +20,11 @@ func (m *loadGitDetails) Name() string {
|
||||||
return "LoadGitDetails"
|
return "LoadGitDetails"
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *loadGitDetails) Apply(ctx context.Context, b *bundle.Bundle) error {
|
func (m *loadGitDetails) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||||
// Load relevant git repository
|
// Load relevant git repository
|
||||||
repo, err := git.NewRepository(b.Config.Path)
|
repo, err := git.NewRepository(b.BundleRoot)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return diag.FromErr(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Read branch name of current checkout
|
// Read branch name of current checkout
|
||||||
|
@ -55,14 +56,14 @@ func (m *loadGitDetails) Apply(ctx context.Context, b *bundle.Bundle) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Compute relative path of the bundle root from the Git repo root.
|
// Compute relative path of the bundle root from the Git repo root.
|
||||||
absBundlePath, err := filepath.Abs(b.Config.Path)
|
absBundlePath, err := filepath.Abs(b.RootPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return diag.FromErr(err)
|
||||||
}
|
}
|
||||||
// repo.Root() returns the absolute path of the repo
|
// repo.Root() returns the absolute path of the repo
|
||||||
relBundlePath, err := filepath.Rel(repo.Root(), absBundlePath)
|
relBundlePath, err := filepath.Rel(repo.Root(), absBundlePath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return diag.FromErr(err)
|
||||||
}
|
}
|
||||||
b.Config.Bundle.Git.BundleRootPath = filepath.ToSlash(relBundlePath)
|
b.Config.Bundle.Git.BundleRootPath = filepath.ToSlash(relBundlePath)
|
||||||
return nil
|
return nil
|
||||||
|
|
|
@ -0,0 +1,45 @@
|
||||||
|
package mutator
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
"github.com/databricks/cli/bundle"
|
||||||
|
"github.com/databricks/cli/libs/diag"
|
||||||
|
"github.com/databricks/cli/libs/dyn"
|
||||||
|
"github.com/databricks/cli/libs/dyn/merge"
|
||||||
|
)
|
||||||
|
|
||||||
|
type mergeJobClusters struct{}
|
||||||
|
|
||||||
|
func MergeJobClusters() bundle.Mutator {
|
||||||
|
return &mergeJobClusters{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mergeJobClusters) Name() string {
|
||||||
|
return "MergeJobClusters"
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mergeJobClusters) jobClusterKey(v dyn.Value) string {
|
||||||
|
switch v.Kind() {
|
||||||
|
case dyn.KindInvalid, dyn.KindNil:
|
||||||
|
return ""
|
||||||
|
case dyn.KindString:
|
||||||
|
return v.MustString()
|
||||||
|
default:
|
||||||
|
panic("job cluster key must be a string")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mergeJobClusters) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||||
|
err := b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) {
|
||||||
|
if v.Kind() == dyn.KindNil {
|
||||||
|
return v, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return dyn.Map(v, "resources.jobs", dyn.Foreach(func(_ dyn.Path, job dyn.Value) (dyn.Value, error) {
|
||||||
|
return dyn.Map(job, "job_clusters", merge.ElementsByKey("job_cluster_key", m.jobClusterKey))
|
||||||
|
}))
|
||||||
|
})
|
||||||
|
|
||||||
|
return diag.FromErr(err)
|
||||||
|
}
|
|
@ -0,0 +1,105 @@
|
||||||
|
package mutator_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/databricks/cli/bundle"
|
||||||
|
"github.com/databricks/cli/bundle/config"
|
||||||
|
"github.com/databricks/cli/bundle/config/mutator"
|
||||||
|
"github.com/databricks/cli/bundle/config/resources"
|
||||||
|
"github.com/databricks/databricks-sdk-go/service/compute"
|
||||||
|
"github.com/databricks/databricks-sdk-go/service/jobs"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestMergeJobClusters(t *testing.T) {
|
||||||
|
b := &bundle.Bundle{
|
||||||
|
Config: config.Root{
|
||||||
|
Resources: config.Resources{
|
||||||
|
Jobs: map[string]*resources.Job{
|
||||||
|
"foo": {
|
||||||
|
JobSettings: &jobs.JobSettings{
|
||||||
|
JobClusters: []jobs.JobCluster{
|
||||||
|
{
|
||||||
|
JobClusterKey: "foo",
|
||||||
|
NewCluster: compute.ClusterSpec{
|
||||||
|
SparkVersion: "13.3.x-scala2.12",
|
||||||
|
NodeTypeId: "i3.xlarge",
|
||||||
|
NumWorkers: 2,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
JobClusterKey: "bar",
|
||||||
|
NewCluster: compute.ClusterSpec{
|
||||||
|
SparkVersion: "10.4.x-scala2.12",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
JobClusterKey: "foo",
|
||||||
|
NewCluster: compute.ClusterSpec{
|
||||||
|
NodeTypeId: "i3.2xlarge",
|
||||||
|
NumWorkers: 4,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
diags := bundle.Apply(context.Background(), b, mutator.MergeJobClusters())
|
||||||
|
assert.NoError(t, diags.Error())
|
||||||
|
|
||||||
|
j := b.Config.Resources.Jobs["foo"]
|
||||||
|
|
||||||
|
assert.Len(t, j.JobClusters, 2)
|
||||||
|
assert.Equal(t, "foo", j.JobClusters[0].JobClusterKey)
|
||||||
|
assert.Equal(t, "bar", j.JobClusters[1].JobClusterKey)
|
||||||
|
|
||||||
|
// This job cluster was merged with a subsequent one.
|
||||||
|
jc0 := j.JobClusters[0].NewCluster
|
||||||
|
assert.Equal(t, "13.3.x-scala2.12", jc0.SparkVersion)
|
||||||
|
assert.Equal(t, "i3.2xlarge", jc0.NodeTypeId)
|
||||||
|
assert.Equal(t, 4, jc0.NumWorkers)
|
||||||
|
|
||||||
|
// This job cluster was left untouched.
|
||||||
|
jc1 := j.JobClusters[1].NewCluster
|
||||||
|
assert.Equal(t, "10.4.x-scala2.12", jc1.SparkVersion)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMergeJobClustersWithNilKey(t *testing.T) {
|
||||||
|
b := &bundle.Bundle{
|
||||||
|
Config: config.Root{
|
||||||
|
Resources: config.Resources{
|
||||||
|
Jobs: map[string]*resources.Job{
|
||||||
|
"foo": {
|
||||||
|
JobSettings: &jobs.JobSettings{
|
||||||
|
JobClusters: []jobs.JobCluster{
|
||||||
|
{
|
||||||
|
NewCluster: compute.ClusterSpec{
|
||||||
|
SparkVersion: "13.3.x-scala2.12",
|
||||||
|
NodeTypeId: "i3.xlarge",
|
||||||
|
NumWorkers: 2,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
NewCluster: compute.ClusterSpec{
|
||||||
|
NodeTypeId: "i3.2xlarge",
|
||||||
|
NumWorkers: 4,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
diags := bundle.Apply(context.Background(), b, mutator.MergeJobClusters())
|
||||||
|
assert.NoError(t, diags.Error())
|
||||||
|
assert.Len(t, b.Config.Resources.Jobs["foo"].JobClusters, 1)
|
||||||
|
}
|
|
@ -0,0 +1,45 @@
|
||||||
|
package mutator
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
"github.com/databricks/cli/bundle"
|
||||||
|
"github.com/databricks/cli/libs/diag"
|
||||||
|
"github.com/databricks/cli/libs/dyn"
|
||||||
|
"github.com/databricks/cli/libs/dyn/merge"
|
||||||
|
)
|
||||||
|
|
||||||
|
type mergeJobTasks struct{}
|
||||||
|
|
||||||
|
func MergeJobTasks() bundle.Mutator {
|
||||||
|
return &mergeJobTasks{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mergeJobTasks) Name() string {
|
||||||
|
return "MergeJobTasks"
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mergeJobTasks) taskKeyString(v dyn.Value) string {
|
||||||
|
switch v.Kind() {
|
||||||
|
case dyn.KindInvalid, dyn.KindNil:
|
||||||
|
return ""
|
||||||
|
case dyn.KindString:
|
||||||
|
return v.MustString()
|
||||||
|
default:
|
||||||
|
panic("task key must be a string")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mergeJobTasks) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||||
|
err := b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) {
|
||||||
|
if v.Kind() == dyn.KindNil {
|
||||||
|
return v, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return dyn.Map(v, "resources.jobs", dyn.Foreach(func(_ dyn.Path, job dyn.Value) (dyn.Value, error) {
|
||||||
|
return dyn.Map(job, "tasks", merge.ElementsByKey("task_key", m.taskKeyString))
|
||||||
|
}))
|
||||||
|
})
|
||||||
|
|
||||||
|
return diag.FromErr(err)
|
||||||
|
}
|
|
@ -0,0 +1,117 @@
|
||||||
|
package mutator_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/databricks/cli/bundle"
|
||||||
|
"github.com/databricks/cli/bundle/config"
|
||||||
|
"github.com/databricks/cli/bundle/config/mutator"
|
||||||
|
"github.com/databricks/cli/bundle/config/resources"
|
||||||
|
"github.com/databricks/databricks-sdk-go/service/compute"
|
||||||
|
"github.com/databricks/databricks-sdk-go/service/jobs"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestMergeJobTasks(t *testing.T) {
|
||||||
|
b := &bundle.Bundle{
|
||||||
|
Config: config.Root{
|
||||||
|
Resources: config.Resources{
|
||||||
|
Jobs: map[string]*resources.Job{
|
||||||
|
"foo": {
|
||||||
|
JobSettings: &jobs.JobSettings{
|
||||||
|
Tasks: []jobs.Task{
|
||||||
|
{
|
||||||
|
TaskKey: "foo",
|
||||||
|
NewCluster: &compute.ClusterSpec{
|
||||||
|
SparkVersion: "13.3.x-scala2.12",
|
||||||
|
NodeTypeId: "i3.xlarge",
|
||||||
|
NumWorkers: 2,
|
||||||
|
},
|
||||||
|
Libraries: []compute.Library{
|
||||||
|
{Whl: "package1"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
TaskKey: "bar",
|
||||||
|
NewCluster: &compute.ClusterSpec{
|
||||||
|
SparkVersion: "10.4.x-scala2.12",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
TaskKey: "foo",
|
||||||
|
NewCluster: &compute.ClusterSpec{
|
||||||
|
NodeTypeId: "i3.2xlarge",
|
||||||
|
NumWorkers: 4,
|
||||||
|
},
|
||||||
|
Libraries: []compute.Library{
|
||||||
|
{Pypi: &compute.PythonPyPiLibrary{
|
||||||
|
Package: "package2",
|
||||||
|
}},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
diags := bundle.Apply(context.Background(), b, mutator.MergeJobTasks())
|
||||||
|
assert.NoError(t, diags.Error())
|
||||||
|
|
||||||
|
j := b.Config.Resources.Jobs["foo"]
|
||||||
|
|
||||||
|
assert.Len(t, j.Tasks, 2)
|
||||||
|
assert.Equal(t, "foo", j.Tasks[0].TaskKey)
|
||||||
|
assert.Equal(t, "bar", j.Tasks[1].TaskKey)
|
||||||
|
|
||||||
|
// This task was merged with a subsequent one.
|
||||||
|
task0 := j.Tasks[0]
|
||||||
|
cluster := task0.NewCluster
|
||||||
|
assert.Equal(t, "13.3.x-scala2.12", cluster.SparkVersion)
|
||||||
|
assert.Equal(t, "i3.2xlarge", cluster.NodeTypeId)
|
||||||
|
assert.Equal(t, 4, cluster.NumWorkers)
|
||||||
|
assert.Len(t, task0.Libraries, 2)
|
||||||
|
assert.Equal(t, task0.Libraries[0].Whl, "package1")
|
||||||
|
assert.Equal(t, task0.Libraries[1].Pypi.Package, "package2")
|
||||||
|
|
||||||
|
// This task was left untouched.
|
||||||
|
task1 := j.Tasks[1].NewCluster
|
||||||
|
assert.Equal(t, "10.4.x-scala2.12", task1.SparkVersion)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMergeJobTasksWithNilKey(t *testing.T) {
|
||||||
|
b := &bundle.Bundle{
|
||||||
|
Config: config.Root{
|
||||||
|
Resources: config.Resources{
|
||||||
|
Jobs: map[string]*resources.Job{
|
||||||
|
"foo": {
|
||||||
|
JobSettings: &jobs.JobSettings{
|
||||||
|
Tasks: []jobs.Task{
|
||||||
|
{
|
||||||
|
NewCluster: &compute.ClusterSpec{
|
||||||
|
SparkVersion: "13.3.x-scala2.12",
|
||||||
|
NodeTypeId: "i3.xlarge",
|
||||||
|
NumWorkers: 2,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
NewCluster: &compute.ClusterSpec{
|
||||||
|
NodeTypeId: "i3.2xlarge",
|
||||||
|
NumWorkers: 4,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
diags := bundle.Apply(context.Background(), b, mutator.MergeJobTasks())
|
||||||
|
assert.NoError(t, diags.Error())
|
||||||
|
assert.Len(t, b.Config.Resources.Jobs["foo"].Tasks, 1)
|
||||||
|
}
|
|
@ -0,0 +1,48 @@
|
||||||
|
package mutator
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/databricks/cli/bundle"
|
||||||
|
"github.com/databricks/cli/libs/diag"
|
||||||
|
"github.com/databricks/cli/libs/dyn"
|
||||||
|
"github.com/databricks/cli/libs/dyn/merge"
|
||||||
|
)
|
||||||
|
|
||||||
|
type mergePipelineClusters struct{}
|
||||||
|
|
||||||
|
func MergePipelineClusters() bundle.Mutator {
|
||||||
|
return &mergePipelineClusters{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mergePipelineClusters) Name() string {
|
||||||
|
return "MergePipelineClusters"
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mergePipelineClusters) clusterLabel(v dyn.Value) string {
|
||||||
|
switch v.Kind() {
|
||||||
|
case dyn.KindInvalid, dyn.KindNil:
|
||||||
|
// Note: the cluster label is optional and defaults to 'default'.
|
||||||
|
// We therefore ALSO merge all clusters without a label.
|
||||||
|
return "default"
|
||||||
|
case dyn.KindString:
|
||||||
|
return strings.ToLower(v.MustString())
|
||||||
|
default:
|
||||||
|
panic("task key must be a string")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mergePipelineClusters) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||||
|
err := b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) {
|
||||||
|
if v.Kind() == dyn.KindNil {
|
||||||
|
return v, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return dyn.Map(v, "resources.pipelines", dyn.Foreach(func(_ dyn.Path, pipeline dyn.Value) (dyn.Value, error) {
|
||||||
|
return dyn.Map(pipeline, "clusters", merge.ElementsByKey("label", m.clusterLabel))
|
||||||
|
}))
|
||||||
|
})
|
||||||
|
|
||||||
|
return diag.FromErr(err)
|
||||||
|
}
|
|
@ -0,0 +1,125 @@
|
||||||
|
package mutator_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/databricks/cli/bundle"
|
||||||
|
"github.com/databricks/cli/bundle/config"
|
||||||
|
"github.com/databricks/cli/bundle/config/mutator"
|
||||||
|
"github.com/databricks/cli/bundle/config/resources"
|
||||||
|
"github.com/databricks/databricks-sdk-go/service/pipelines"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestMergePipelineClusters(t *testing.T) {
|
||||||
|
b := &bundle.Bundle{
|
||||||
|
Config: config.Root{
|
||||||
|
Resources: config.Resources{
|
||||||
|
Pipelines: map[string]*resources.Pipeline{
|
||||||
|
"foo": {
|
||||||
|
PipelineSpec: &pipelines.PipelineSpec{
|
||||||
|
Clusters: []pipelines.PipelineCluster{
|
||||||
|
{
|
||||||
|
NodeTypeId: "i3.xlarge",
|
||||||
|
NumWorkers: 2,
|
||||||
|
PolicyId: "1234",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Label: "maintenance",
|
||||||
|
NodeTypeId: "i3.2xlarge",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
NodeTypeId: "i3.2xlarge",
|
||||||
|
NumWorkers: 4,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
diags := bundle.Apply(context.Background(), b, mutator.MergePipelineClusters())
|
||||||
|
assert.NoError(t, diags.Error())
|
||||||
|
|
||||||
|
p := b.Config.Resources.Pipelines["foo"]
|
||||||
|
|
||||||
|
assert.Len(t, p.Clusters, 2)
|
||||||
|
assert.Equal(t, "default", p.Clusters[0].Label)
|
||||||
|
assert.Equal(t, "maintenance", p.Clusters[1].Label)
|
||||||
|
|
||||||
|
// The default cluster was merged with a subsequent one.
|
||||||
|
pc0 := p.Clusters[0]
|
||||||
|
assert.Equal(t, "i3.2xlarge", pc0.NodeTypeId)
|
||||||
|
assert.Equal(t, 4, pc0.NumWorkers)
|
||||||
|
assert.Equal(t, "1234", pc0.PolicyId)
|
||||||
|
|
||||||
|
// The maintenance cluster was left untouched.
|
||||||
|
pc1 := p.Clusters[1]
|
||||||
|
assert.Equal(t, "i3.2xlarge", pc1.NodeTypeId)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMergePipelineClustersCaseInsensitive(t *testing.T) {
|
||||||
|
b := &bundle.Bundle{
|
||||||
|
Config: config.Root{
|
||||||
|
Resources: config.Resources{
|
||||||
|
Pipelines: map[string]*resources.Pipeline{
|
||||||
|
"foo": {
|
||||||
|
PipelineSpec: &pipelines.PipelineSpec{
|
||||||
|
Clusters: []pipelines.PipelineCluster{
|
||||||
|
{
|
||||||
|
Label: "default",
|
||||||
|
NumWorkers: 2,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Label: "DEFAULT",
|
||||||
|
NumWorkers: 4,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
diags := bundle.Apply(context.Background(), b, mutator.MergePipelineClusters())
|
||||||
|
assert.NoError(t, diags.Error())
|
||||||
|
|
||||||
|
p := b.Config.Resources.Pipelines["foo"]
|
||||||
|
assert.Len(t, p.Clusters, 1)
|
||||||
|
|
||||||
|
// The default cluster was merged with a subsequent one.
|
||||||
|
pc0 := p.Clusters[0]
|
||||||
|
assert.Equal(t, "default", strings.ToLower(pc0.Label))
|
||||||
|
assert.Equal(t, 4, pc0.NumWorkers)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMergePipelineClustersNilPipelines(t *testing.T) {
|
||||||
|
b := &bundle.Bundle{
|
||||||
|
Config: config.Root{
|
||||||
|
Resources: config.Resources{
|
||||||
|
Pipelines: nil,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
diags := bundle.Apply(context.Background(), b, mutator.MergePipelineClusters())
|
||||||
|
assert.NoError(t, diags.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMergePipelineClustersEmptyPipelines(t *testing.T) {
|
||||||
|
b := &bundle.Bundle{
|
||||||
|
Config: config.Root{
|
||||||
|
Resources: config.Resources{
|
||||||
|
Pipelines: map[string]*resources.Pipeline{},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
diags := bundle.Apply(context.Background(), b, mutator.MergePipelineClusters())
|
||||||
|
assert.NoError(t, diags.Error())
|
||||||
|
}
|
|
@ -3,19 +3,28 @@ package mutator
|
||||||
import (
|
import (
|
||||||
"github.com/databricks/cli/bundle"
|
"github.com/databricks/cli/bundle"
|
||||||
"github.com/databricks/cli/bundle/config"
|
"github.com/databricks/cli/bundle/config"
|
||||||
|
"github.com/databricks/cli/bundle/config/loader"
|
||||||
|
pythonmutator "github.com/databricks/cli/bundle/config/mutator/python"
|
||||||
"github.com/databricks/cli/bundle/scripts"
|
"github.com/databricks/cli/bundle/scripts"
|
||||||
)
|
)
|
||||||
|
|
||||||
func DefaultMutators() []bundle.Mutator {
|
func DefaultMutators() []bundle.Mutator {
|
||||||
return []bundle.Mutator{
|
return []bundle.Mutator{
|
||||||
|
loader.EntryPoint(),
|
||||||
|
|
||||||
|
// Execute preinit script before processing includes.
|
||||||
|
// It needs to be done before processing configuration files to allow
|
||||||
|
// the script to modify the configuration or add own configuration files.
|
||||||
scripts.Execute(config.ScriptPreInit),
|
scripts.Execute(config.ScriptPreInit),
|
||||||
ProcessRootIncludes(),
|
loader.ProcessRootIncludes(),
|
||||||
|
|
||||||
|
// Verify that the CLI version is within the specified range.
|
||||||
|
VerifyCliVersion(),
|
||||||
|
|
||||||
|
EnvironmentsToTargets(),
|
||||||
InitializeVariables(),
|
InitializeVariables(),
|
||||||
DefineDefaultTarget(),
|
DefineDefaultTarget(),
|
||||||
LoadGitDetails(),
|
LoadGitDetails(),
|
||||||
|
pythonmutator.PythonMutator(pythonmutator.PythonMutatorPhaseLoad),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func DefaultMutatorsForTarget(env string) []bundle.Mutator {
|
|
||||||
return append(DefaultMutators(), SelectTarget(env))
|
|
||||||
}
|
|
||||||
|
|
|
@ -4,11 +4,12 @@ import (
|
||||||
"context"
|
"context"
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle"
|
"github.com/databricks/cli/bundle"
|
||||||
|
"github.com/databricks/cli/libs/diag"
|
||||||
)
|
)
|
||||||
|
|
||||||
type noop struct{}
|
type noop struct{}
|
||||||
|
|
||||||
func (*noop) Apply(context.Context, *bundle.Bundle) error {
|
func (*noop) Apply(context.Context, *bundle.Bundle) diag.Diagnostics {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -2,11 +2,11 @@ package mutator
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle"
|
"github.com/databricks/cli/bundle"
|
||||||
"github.com/databricks/cli/bundle/config"
|
"github.com/databricks/cli/bundle/config"
|
||||||
"github.com/databricks/cli/bundle/config/resources"
|
"github.com/databricks/cli/bundle/config/resources"
|
||||||
|
"github.com/databricks/cli/libs/diag"
|
||||||
"github.com/databricks/cli/libs/env"
|
"github.com/databricks/cli/libs/env"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -22,20 +22,25 @@ func (m *overrideCompute) Name() string {
|
||||||
|
|
||||||
func overrideJobCompute(j *resources.Job, compute string) {
|
func overrideJobCompute(j *resources.Job, compute string) {
|
||||||
for i := range j.Tasks {
|
for i := range j.Tasks {
|
||||||
task := &j.Tasks[i]
|
var task = &j.Tasks[i]
|
||||||
if task.NewCluster != nil || task.ExistingClusterId != "" || task.ComputeKey != "" || task.JobClusterKey != "" {
|
|
||||||
|
if task.ForEachTask != nil {
|
||||||
|
task = &task.ForEachTask.Task
|
||||||
|
}
|
||||||
|
|
||||||
|
if task.NewCluster != nil || task.ExistingClusterId != "" || task.EnvironmentKey != "" || task.JobClusterKey != "" {
|
||||||
task.NewCluster = nil
|
task.NewCluster = nil
|
||||||
task.JobClusterKey = ""
|
task.JobClusterKey = ""
|
||||||
task.ComputeKey = ""
|
task.EnvironmentKey = ""
|
||||||
task.ExistingClusterId = compute
|
task.ExistingClusterId = compute
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *overrideCompute) Apply(ctx context.Context, b *bundle.Bundle) error {
|
func (m *overrideCompute) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||||
if b.Config.Bundle.Mode != config.Development {
|
if b.Config.Bundle.Mode != config.Development {
|
||||||
if b.Config.Bundle.ComputeID != "" {
|
if b.Config.Bundle.ComputeID != "" {
|
||||||
return fmt.Errorf("cannot override compute for an target that does not use 'mode: development'")
|
return diag.Errorf("cannot override compute for an target that does not use 'mode: development'")
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -28,13 +28,15 @@ func TestOverrideDevelopment(t *testing.T) {
|
||||||
Name: "job1",
|
Name: "job1",
|
||||||
Tasks: []jobs.Task{
|
Tasks: []jobs.Task{
|
||||||
{
|
{
|
||||||
NewCluster: &compute.ClusterSpec{},
|
NewCluster: &compute.ClusterSpec{
|
||||||
|
SparkVersion: "14.2.x-scala2.12",
|
||||||
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
ExistingClusterId: "cluster2",
|
ExistingClusterId: "cluster2",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
ComputeKey: "compute_key",
|
EnvironmentKey: "environment_key",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
JobClusterKey: "cluster_key",
|
JobClusterKey: "cluster_key",
|
||||||
|
@ -47,8 +49,8 @@ func TestOverrideDevelopment(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
m := mutator.OverrideCompute()
|
m := mutator.OverrideCompute()
|
||||||
err := bundle.Apply(context.Background(), b, m)
|
diags := bundle.Apply(context.Background(), b, m)
|
||||||
require.NoError(t, err)
|
require.NoError(t, diags.Error())
|
||||||
assert.Nil(t, b.Config.Resources.Jobs["job1"].Tasks[0].NewCluster)
|
assert.Nil(t, b.Config.Resources.Jobs["job1"].Tasks[0].NewCluster)
|
||||||
assert.Equal(t, "newClusterID", b.Config.Resources.Jobs["job1"].Tasks[0].ExistingClusterId)
|
assert.Equal(t, "newClusterID", b.Config.Resources.Jobs["job1"].Tasks[0].ExistingClusterId)
|
||||||
assert.Equal(t, "newClusterID", b.Config.Resources.Jobs["job1"].Tasks[1].ExistingClusterId)
|
assert.Equal(t, "newClusterID", b.Config.Resources.Jobs["job1"].Tasks[1].ExistingClusterId)
|
||||||
|
@ -56,7 +58,7 @@ func TestOverrideDevelopment(t *testing.T) {
|
||||||
assert.Equal(t, "newClusterID", b.Config.Resources.Jobs["job1"].Tasks[3].ExistingClusterId)
|
assert.Equal(t, "newClusterID", b.Config.Resources.Jobs["job1"].Tasks[3].ExistingClusterId)
|
||||||
|
|
||||||
assert.Nil(t, b.Config.Resources.Jobs["job1"].Tasks[0].NewCluster)
|
assert.Nil(t, b.Config.Resources.Jobs["job1"].Tasks[0].NewCluster)
|
||||||
assert.Empty(t, b.Config.Resources.Jobs["job1"].Tasks[2].ComputeKey)
|
assert.Empty(t, b.Config.Resources.Jobs["job1"].Tasks[2].EnvironmentKey)
|
||||||
assert.Empty(t, b.Config.Resources.Jobs["job1"].Tasks[3].JobClusterKey)
|
assert.Empty(t, b.Config.Resources.Jobs["job1"].Tasks[3].JobClusterKey)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -83,8 +85,8 @@ func TestOverrideDevelopmentEnv(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
m := mutator.OverrideCompute()
|
m := mutator.OverrideCompute()
|
||||||
err := bundle.Apply(context.Background(), b, m)
|
diags := bundle.Apply(context.Background(), b, m)
|
||||||
require.NoError(t, err)
|
require.NoError(t, diags.Error())
|
||||||
assert.Equal(t, "cluster2", b.Config.Resources.Jobs["job1"].Tasks[1].ExistingClusterId)
|
assert.Equal(t, "cluster2", b.Config.Resources.Jobs["job1"].Tasks[1].ExistingClusterId)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -108,11 +110,36 @@ func TestOverridePipelineTask(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
m := mutator.OverrideCompute()
|
m := mutator.OverrideCompute()
|
||||||
err := bundle.Apply(context.Background(), b, m)
|
diags := bundle.Apply(context.Background(), b, m)
|
||||||
require.NoError(t, err)
|
require.NoError(t, diags.Error())
|
||||||
assert.Empty(t, b.Config.Resources.Jobs["job1"].Tasks[0].ExistingClusterId)
|
assert.Empty(t, b.Config.Resources.Jobs["job1"].Tasks[0].ExistingClusterId)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestOverrideForEachTask(t *testing.T) {
|
||||||
|
t.Setenv("DATABRICKS_CLUSTER_ID", "newClusterId")
|
||||||
|
b := &bundle.Bundle{
|
||||||
|
Config: config.Root{
|
||||||
|
Resources: config.Resources{
|
||||||
|
Jobs: map[string]*resources.Job{
|
||||||
|
"job1": {JobSettings: &jobs.JobSettings{
|
||||||
|
Name: "job1",
|
||||||
|
Tasks: []jobs.Task{
|
||||||
|
{
|
||||||
|
ForEachTask: &jobs.ForEachTask{},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
m := mutator.OverrideCompute()
|
||||||
|
diags := bundle.Apply(context.Background(), b, m)
|
||||||
|
require.NoError(t, diags.Error())
|
||||||
|
assert.Empty(t, b.Config.Resources.Jobs["job1"].Tasks[0].ForEachTask.Task)
|
||||||
|
}
|
||||||
|
|
||||||
func TestOverrideProduction(t *testing.T) {
|
func TestOverrideProduction(t *testing.T) {
|
||||||
b := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
|
@ -138,8 +165,8 @@ func TestOverrideProduction(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
m := mutator.OverrideCompute()
|
m := mutator.OverrideCompute()
|
||||||
err := bundle.Apply(context.Background(), b, m)
|
diags := bundle.Apply(context.Background(), b, m)
|
||||||
require.Error(t, err)
|
require.True(t, diags.HasError())
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestOverrideProductionEnv(t *testing.T) {
|
func TestOverrideProductionEnv(t *testing.T) {
|
||||||
|
@ -165,6 +192,6 @@ func TestOverrideProductionEnv(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
m := mutator.OverrideCompute()
|
m := mutator.OverrideCompute()
|
||||||
err := bundle.Apply(context.Background(), b, m)
|
diags := bundle.Apply(context.Background(), b, m)
|
||||||
require.NoError(t, err)
|
require.NoError(t, diags.Error())
|
||||||
}
|
}
|
||||||
|
|
|
@ -6,6 +6,7 @@ import (
|
||||||
"github.com/databricks/cli/bundle"
|
"github.com/databricks/cli/bundle"
|
||||||
"github.com/databricks/cli/bundle/config"
|
"github.com/databricks/cli/bundle/config"
|
||||||
"github.com/databricks/cli/libs/auth"
|
"github.com/databricks/cli/libs/auth"
|
||||||
|
"github.com/databricks/cli/libs/diag"
|
||||||
"github.com/databricks/cli/libs/tags"
|
"github.com/databricks/cli/libs/tags"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -20,7 +21,7 @@ func (m *populateCurrentUser) Name() string {
|
||||||
return "PopulateCurrentUser"
|
return "PopulateCurrentUser"
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *populateCurrentUser) Apply(ctx context.Context, b *bundle.Bundle) error {
|
func (m *populateCurrentUser) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||||
if b.Config.Workspace.CurrentUser != nil {
|
if b.Config.Workspace.CurrentUser != nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -28,7 +29,7 @@ func (m *populateCurrentUser) Apply(ctx context.Context, b *bundle.Bundle) error
|
||||||
w := b.WorkspaceClient()
|
w := b.WorkspaceClient()
|
||||||
me, err := w.CurrentUser.Me(ctx)
|
me, err := w.CurrentUser.Me(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return diag.FromErr(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
b.Config.Workspace.CurrentUser = &config.User{
|
b.Config.Workspace.CurrentUser = &config.User{
|
||||||
|
|
|
@ -1,167 +0,0 @@
|
||||||
package mutator_test
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"os"
|
|
||||||
"path"
|
|
||||||
"path/filepath"
|
|
||||||
"runtime"
|
|
||||||
"strings"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle"
|
|
||||||
"github.com/databricks/cli/bundle/config"
|
|
||||||
"github.com/databricks/cli/bundle/config/mutator"
|
|
||||||
"github.com/databricks/cli/bundle/env"
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
)
|
|
||||||
|
|
||||||
func touch(t *testing.T, path, file string) {
|
|
||||||
f, err := os.Create(filepath.Join(path, file))
|
|
||||||
require.NoError(t, err)
|
|
||||||
f.Close()
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestProcessRootIncludesEmpty(t *testing.T) {
|
|
||||||
b := &bundle.Bundle{
|
|
||||||
Config: config.Root{
|
|
||||||
Path: ".",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
err := bundle.Apply(context.Background(), b, mutator.ProcessRootIncludes())
|
|
||||||
require.NoError(t, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestProcessRootIncludesAbs(t *testing.T) {
|
|
||||||
// remove this once equivalent tests for windows have been set up
|
|
||||||
// or this test has been fixed for windows
|
|
||||||
// date: 28 Nov 2022
|
|
||||||
if runtime.GOOS == "windows" {
|
|
||||||
t.Skip("skipping temperorilty to make windows unit tests green")
|
|
||||||
}
|
|
||||||
|
|
||||||
b := &bundle.Bundle{
|
|
||||||
Config: config.Root{
|
|
||||||
Path: ".",
|
|
||||||
Include: []string{
|
|
||||||
"/tmp/*.yml",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
err := bundle.Apply(context.Background(), b, mutator.ProcessRootIncludes())
|
|
||||||
require.Error(t, err)
|
|
||||||
assert.Contains(t, err.Error(), "must be relative paths")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestProcessRootIncludesSingleGlob(t *testing.T) {
|
|
||||||
b := &bundle.Bundle{
|
|
||||||
Config: config.Root{
|
|
||||||
Path: t.TempDir(),
|
|
||||||
Include: []string{
|
|
||||||
"*.yml",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
touch(t, b.Config.Path, "databricks.yml")
|
|
||||||
touch(t, b.Config.Path, "a.yml")
|
|
||||||
touch(t, b.Config.Path, "b.yml")
|
|
||||||
|
|
||||||
err := bundle.Apply(context.Background(), b, mutator.ProcessRootIncludes())
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
assert.Equal(t, []string{"a.yml", "b.yml"}, b.Config.Include)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestProcessRootIncludesMultiGlob(t *testing.T) {
|
|
||||||
b := &bundle.Bundle{
|
|
||||||
Config: config.Root{
|
|
||||||
Path: t.TempDir(),
|
|
||||||
Include: []string{
|
|
||||||
"a*.yml",
|
|
||||||
"b*.yml",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
touch(t, b.Config.Path, "a1.yml")
|
|
||||||
touch(t, b.Config.Path, "b1.yml")
|
|
||||||
|
|
||||||
err := bundle.Apply(context.Background(), b, mutator.ProcessRootIncludes())
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
assert.Equal(t, []string{"a1.yml", "b1.yml"}, b.Config.Include)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestProcessRootIncludesRemoveDups(t *testing.T) {
|
|
||||||
b := &bundle.Bundle{
|
|
||||||
Config: config.Root{
|
|
||||||
Path: t.TempDir(),
|
|
||||||
Include: []string{
|
|
||||||
"*.yml",
|
|
||||||
"*.yml",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
touch(t, b.Config.Path, "a.yml")
|
|
||||||
|
|
||||||
err := bundle.Apply(context.Background(), b, mutator.ProcessRootIncludes())
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Equal(t, []string{"a.yml"}, b.Config.Include)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestProcessRootIncludesNotExists(t *testing.T) {
|
|
||||||
b := &bundle.Bundle{
|
|
||||||
Config: config.Root{
|
|
||||||
Path: t.TempDir(),
|
|
||||||
Include: []string{
|
|
||||||
"notexist.yml",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
err := bundle.Apply(context.Background(), b, mutator.ProcessRootIncludes())
|
|
||||||
require.Error(t, err)
|
|
||||||
assert.Contains(t, err.Error(), "notexist.yml defined in 'include' section does not match any files")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestProcessRootIncludesExtrasFromEnvVar(t *testing.T) {
|
|
||||||
rootPath := t.TempDir()
|
|
||||||
testYamlName := "extra_include_path.yml"
|
|
||||||
touch(t, rootPath, testYamlName)
|
|
||||||
t.Setenv(env.IncludesVariable, path.Join(rootPath, testYamlName))
|
|
||||||
|
|
||||||
b := &bundle.Bundle{
|
|
||||||
Config: config.Root{
|
|
||||||
Path: rootPath,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
err := bundle.Apply(context.Background(), b, mutator.ProcessRootIncludes())
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Contains(t, b.Config.Include, testYamlName)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestProcessRootIncludesDedupExtrasFromEnvVar(t *testing.T) {
|
|
||||||
rootPath := t.TempDir()
|
|
||||||
testYamlName := "extra_include_path.yml"
|
|
||||||
touch(t, rootPath, testYamlName)
|
|
||||||
t.Setenv(env.IncludesVariable, strings.Join(
|
|
||||||
[]string{
|
|
||||||
path.Join(rootPath, testYamlName),
|
|
||||||
path.Join(rootPath, testYamlName),
|
|
||||||
},
|
|
||||||
string(os.PathListSeparator),
|
|
||||||
))
|
|
||||||
|
|
||||||
b := &bundle.Bundle{
|
|
||||||
Config: config.Root{
|
|
||||||
Path: rootPath,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
err := bundle.Apply(context.Background(), b, mutator.ProcessRootIncludes())
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Equal(t, []string{testYamlName}, b.Config.Include)
|
|
||||||
}
|
|
|
@ -2,14 +2,15 @@ package mutator
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
|
||||||
"path"
|
"path"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle"
|
"github.com/databricks/cli/bundle"
|
||||||
"github.com/databricks/cli/bundle/config"
|
"github.com/databricks/cli/bundle/config"
|
||||||
"github.com/databricks/cli/libs/auth"
|
"github.com/databricks/cli/libs/auth"
|
||||||
|
"github.com/databricks/cli/libs/diag"
|
||||||
"github.com/databricks/cli/libs/log"
|
"github.com/databricks/cli/libs/log"
|
||||||
|
"github.com/databricks/databricks-sdk-go/service/catalog"
|
||||||
"github.com/databricks/databricks-sdk-go/service/jobs"
|
"github.com/databricks/databricks-sdk-go/service/jobs"
|
||||||
"github.com/databricks/databricks-sdk-go/service/ml"
|
"github.com/databricks/databricks-sdk-go/service/ml"
|
||||||
)
|
)
|
||||||
|
@ -29,9 +30,14 @@ func (m *processTargetMode) Name() string {
|
||||||
// Mark all resources as being for 'development' purposes, i.e.
|
// Mark all resources as being for 'development' purposes, i.e.
|
||||||
// changing their their name, adding tags, and (in the future)
|
// changing their their name, adding tags, and (in the future)
|
||||||
// marking them as 'hidden' in the UI.
|
// marking them as 'hidden' in the UI.
|
||||||
func transformDevelopmentMode(b *bundle.Bundle) error {
|
func transformDevelopmentMode(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||||
r := b.Config.Resources
|
if !b.Config.Bundle.Deployment.Lock.IsExplicitlyEnabled() {
|
||||||
|
log.Infof(ctx, "Development mode: disabling deployment lock since bundle.deployment.lock.enabled is not set to true")
|
||||||
|
disabled := false
|
||||||
|
b.Config.Bundle.Deployment.Lock.Enabled = &disabled
|
||||||
|
}
|
||||||
|
|
||||||
|
r := b.Config.Resources
|
||||||
shortName := b.Config.Workspace.CurrentUser.ShortName
|
shortName := b.Config.Workspace.CurrentUser.ShortName
|
||||||
prefix := "[dev " + shortName + "] "
|
prefix := "[dev " + shortName + "] "
|
||||||
|
|
||||||
|
@ -70,7 +76,7 @@ func transformDevelopmentMode(b *bundle.Bundle) error {
|
||||||
|
|
||||||
for i := range r.Models {
|
for i := range r.Models {
|
||||||
r.Models[i].Name = prefix + r.Models[i].Name
|
r.Models[i].Name = prefix + r.Models[i].Name
|
||||||
r.Models[i].Tags = append(r.Models[i].Tags, ml.ModelTag{Key: "dev", Value: ""})
|
r.Models[i].Tags = append(r.Models[i].Tags, ml.ModelTag{Key: "dev", Value: tagValue})
|
||||||
}
|
}
|
||||||
|
|
||||||
for i := range r.Experiments {
|
for i := range r.Experiments {
|
||||||
|
@ -97,12 +103,21 @@ func transformDevelopmentMode(b *bundle.Bundle) error {
|
||||||
// (registered models in Unity Catalog don't yet support tags)
|
// (registered models in Unity Catalog don't yet support tags)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
for i := range r.QualityMonitors {
|
||||||
|
// Remove all schedules from monitors, since they don't support pausing/unpausing.
|
||||||
|
// Quality monitors might support the "pause" property in the future, so at the
|
||||||
|
// CLI level we do respect that property if it is set to "unpaused".
|
||||||
|
if r.QualityMonitors[i].Schedule != nil && r.QualityMonitors[i].Schedule.PauseStatus != catalog.MonitorCronSchedulePauseStatusUnpaused {
|
||||||
|
r.QualityMonitors[i].Schedule = nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func validateDevelopmentMode(b *bundle.Bundle) error {
|
func validateDevelopmentMode(b *bundle.Bundle) diag.Diagnostics {
|
||||||
if path := findNonUserPath(b); path != "" {
|
if path := findNonUserPath(b); path != "" {
|
||||||
return fmt.Errorf("%s must start with '~/' or contain the current username when using 'mode: development'", path)
|
return diag.Errorf("%s must start with '~/' or contain the current username when using 'mode: development'", path)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -125,7 +140,7 @@ func findNonUserPath(b *bundle.Bundle) string {
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
|
||||||
func validateProductionMode(ctx context.Context, b *bundle.Bundle, isPrincipalUsed bool) error {
|
func validateProductionMode(ctx context.Context, b *bundle.Bundle, isPrincipalUsed bool) diag.Diagnostics {
|
||||||
if b.Config.Bundle.Git.Inferred {
|
if b.Config.Bundle.Git.Inferred {
|
||||||
env := b.Config.Bundle.Target
|
env := b.Config.Bundle.Target
|
||||||
log.Warnf(ctx, "target with 'mode: production' should specify an explicit 'targets.%s.git' configuration", env)
|
log.Warnf(ctx, "target with 'mode: production' should specify an explicit 'targets.%s.git' configuration", env)
|
||||||
|
@ -134,12 +149,12 @@ func validateProductionMode(ctx context.Context, b *bundle.Bundle, isPrincipalUs
|
||||||
r := b.Config.Resources
|
r := b.Config.Resources
|
||||||
for i := range r.Pipelines {
|
for i := range r.Pipelines {
|
||||||
if r.Pipelines[i].Development {
|
if r.Pipelines[i].Development {
|
||||||
return fmt.Errorf("target with 'mode: production' cannot include a pipeline with 'development: true'")
|
return diag.Errorf("target with 'mode: production' cannot include a pipeline with 'development: true'")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if !isPrincipalUsed && !isRunAsSet(r) {
|
if !isPrincipalUsed && !isRunAsSet(r) {
|
||||||
return fmt.Errorf("'run_as' must be set for all jobs when using 'mode: production'")
|
return diag.Errorf("'run_as' must be set for all jobs when using 'mode: production'")
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -156,21 +171,21 @@ func isRunAsSet(r config.Resources) bool {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *processTargetMode) Apply(ctx context.Context, b *bundle.Bundle) error {
|
func (m *processTargetMode) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||||
switch b.Config.Bundle.Mode {
|
switch b.Config.Bundle.Mode {
|
||||||
case config.Development:
|
case config.Development:
|
||||||
err := validateDevelopmentMode(b)
|
diags := validateDevelopmentMode(b)
|
||||||
if err != nil {
|
if diags != nil {
|
||||||
return err
|
return diags
|
||||||
}
|
}
|
||||||
return transformDevelopmentMode(b)
|
return transformDevelopmentMode(ctx, b)
|
||||||
case config.Production:
|
case config.Production:
|
||||||
isPrincipal := auth.IsServicePrincipal(b.Config.Workspace.CurrentUser.UserName)
|
isPrincipal := auth.IsServicePrincipal(b.Config.Workspace.CurrentUser.UserName)
|
||||||
return validateProductionMode(ctx, b, isPrincipal)
|
return validateProductionMode(ctx, b, isPrincipal)
|
||||||
case "":
|
case "":
|
||||||
// No action
|
// No action
|
||||||
default:
|
default:
|
||||||
return fmt.Errorf("unsupported value '%s' specified for 'mode': must be either 'development' or 'production'", b.Config.Bundle.Mode)
|
return diag.Errorf("unsupported value '%s' specified for 'mode': must be either 'development' or 'production'", b.Config.Bundle.Mode)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
|
|
@ -97,6 +97,23 @@ func mockBundle(mode config.Mode) *bundle.Bundle {
|
||||||
RegisteredModels: map[string]*resources.RegisteredModel{
|
RegisteredModels: map[string]*resources.RegisteredModel{
|
||||||
"registeredmodel1": {CreateRegisteredModelRequest: &catalog.CreateRegisteredModelRequest{Name: "registeredmodel1"}},
|
"registeredmodel1": {CreateRegisteredModelRequest: &catalog.CreateRegisteredModelRequest{Name: "registeredmodel1"}},
|
||||||
},
|
},
|
||||||
|
QualityMonitors: map[string]*resources.QualityMonitor{
|
||||||
|
"qualityMonitor1": {CreateMonitor: &catalog.CreateMonitor{TableName: "qualityMonitor1"}},
|
||||||
|
"qualityMonitor2": {
|
||||||
|
CreateMonitor: &catalog.CreateMonitor{
|
||||||
|
TableName: "qualityMonitor2",
|
||||||
|
Schedule: &catalog.MonitorCronSchedule{},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"qualityMonitor3": {
|
||||||
|
CreateMonitor: &catalog.CreateMonitor{
|
||||||
|
TableName: "qualityMonitor3",
|
||||||
|
Schedule: &catalog.MonitorCronSchedule{
|
||||||
|
PauseStatus: catalog.MonitorCronSchedulePauseStatusUnpaused,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
// Use AWS implementation for testing.
|
// Use AWS implementation for testing.
|
||||||
|
@ -110,8 +127,8 @@ func TestProcessTargetModeDevelopment(t *testing.T) {
|
||||||
b := mockBundle(config.Development)
|
b := mockBundle(config.Development)
|
||||||
|
|
||||||
m := ProcessTargetMode()
|
m := ProcessTargetMode()
|
||||||
err := bundle.Apply(context.Background(), b, m)
|
diags := bundle.Apply(context.Background(), b, m)
|
||||||
require.NoError(t, err)
|
require.NoError(t, diags.Error())
|
||||||
|
|
||||||
// Job 1
|
// Job 1
|
||||||
assert.Equal(t, "[dev lennart] job1", b.Config.Resources.Jobs["job1"].Name)
|
assert.Equal(t, "[dev lennart] job1", b.Config.Resources.Jobs["job1"].Name)
|
||||||
|
@ -138,12 +155,18 @@ func TestProcessTargetModeDevelopment(t *testing.T) {
|
||||||
|
|
||||||
// Model 1
|
// Model 1
|
||||||
assert.Equal(t, "[dev lennart] model1", b.Config.Resources.Models["model1"].Name)
|
assert.Equal(t, "[dev lennart] model1", b.Config.Resources.Models["model1"].Name)
|
||||||
|
assert.Contains(t, b.Config.Resources.Models["model1"].Tags, ml.ModelTag{Key: "dev", Value: "lennart"})
|
||||||
|
|
||||||
// Model serving endpoint 1
|
// Model serving endpoint 1
|
||||||
assert.Equal(t, "dev_lennart_servingendpoint1", b.Config.Resources.ModelServingEndpoints["servingendpoint1"].Name)
|
assert.Equal(t, "dev_lennart_servingendpoint1", b.Config.Resources.ModelServingEndpoints["servingendpoint1"].Name)
|
||||||
|
|
||||||
// Registered model 1
|
// Registered model 1
|
||||||
assert.Equal(t, "dev_lennart_registeredmodel1", b.Config.Resources.RegisteredModels["registeredmodel1"].Name)
|
assert.Equal(t, "dev_lennart_registeredmodel1", b.Config.Resources.RegisteredModels["registeredmodel1"].Name)
|
||||||
|
|
||||||
|
// Quality Monitor 1
|
||||||
|
assert.Equal(t, "qualityMonitor1", b.Config.Resources.QualityMonitors["qualityMonitor1"].TableName)
|
||||||
|
assert.Nil(t, b.Config.Resources.QualityMonitors["qualityMonitor2"].Schedule)
|
||||||
|
assert.Equal(t, catalog.MonitorCronSchedulePauseStatusUnpaused, b.Config.Resources.QualityMonitors["qualityMonitor3"].Schedule.PauseStatus)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestProcessTargetModeDevelopmentTagNormalizationForAws(t *testing.T) {
|
func TestProcessTargetModeDevelopmentTagNormalizationForAws(t *testing.T) {
|
||||||
|
@ -153,8 +176,8 @@ func TestProcessTargetModeDevelopmentTagNormalizationForAws(t *testing.T) {
|
||||||
})
|
})
|
||||||
|
|
||||||
b.Config.Workspace.CurrentUser.ShortName = "Héllö wörld?!"
|
b.Config.Workspace.CurrentUser.ShortName = "Héllö wörld?!"
|
||||||
err := bundle.Apply(context.Background(), b, ProcessTargetMode())
|
diags := bundle.Apply(context.Background(), b, ProcessTargetMode())
|
||||||
require.NoError(t, err)
|
require.NoError(t, diags.Error())
|
||||||
|
|
||||||
// Assert that tag normalization took place.
|
// Assert that tag normalization took place.
|
||||||
assert.Equal(t, "Hello world__", b.Config.Resources.Jobs["job1"].Tags["dev"])
|
assert.Equal(t, "Hello world__", b.Config.Resources.Jobs["job1"].Tags["dev"])
|
||||||
|
@ -167,8 +190,8 @@ func TestProcessTargetModeDevelopmentTagNormalizationForAzure(t *testing.T) {
|
||||||
})
|
})
|
||||||
|
|
||||||
b.Config.Workspace.CurrentUser.ShortName = "Héllö wörld?!"
|
b.Config.Workspace.CurrentUser.ShortName = "Héllö wörld?!"
|
||||||
err := bundle.Apply(context.Background(), b, ProcessTargetMode())
|
diags := bundle.Apply(context.Background(), b, ProcessTargetMode())
|
||||||
require.NoError(t, err)
|
require.NoError(t, diags.Error())
|
||||||
|
|
||||||
// Assert that tag normalization took place (Azure allows more characters than AWS).
|
// Assert that tag normalization took place (Azure allows more characters than AWS).
|
||||||
assert.Equal(t, "Héllö wörld?!", b.Config.Resources.Jobs["job1"].Tags["dev"])
|
assert.Equal(t, "Héllö wörld?!", b.Config.Resources.Jobs["job1"].Tags["dev"])
|
||||||
|
@ -181,8 +204,8 @@ func TestProcessTargetModeDevelopmentTagNormalizationForGcp(t *testing.T) {
|
||||||
})
|
})
|
||||||
|
|
||||||
b.Config.Workspace.CurrentUser.ShortName = "Héllö wörld?!"
|
b.Config.Workspace.CurrentUser.ShortName = "Héllö wörld?!"
|
||||||
err := bundle.Apply(context.Background(), b, ProcessTargetMode())
|
diags := bundle.Apply(context.Background(), b, ProcessTargetMode())
|
||||||
require.NoError(t, err)
|
require.NoError(t, diags.Error())
|
||||||
|
|
||||||
// Assert that tag normalization took place.
|
// Assert that tag normalization took place.
|
||||||
assert.Equal(t, "Hello_world", b.Config.Resources.Jobs["job1"].Tags["dev"])
|
assert.Equal(t, "Hello_world", b.Config.Resources.Jobs["job1"].Tags["dev"])
|
||||||
|
@ -192,27 +215,28 @@ func TestProcessTargetModeDefault(t *testing.T) {
|
||||||
b := mockBundle("")
|
b := mockBundle("")
|
||||||
|
|
||||||
m := ProcessTargetMode()
|
m := ProcessTargetMode()
|
||||||
err := bundle.Apply(context.Background(), b, m)
|
diags := bundle.Apply(context.Background(), b, m)
|
||||||
require.NoError(t, err)
|
require.NoError(t, diags.Error())
|
||||||
assert.Equal(t, "job1", b.Config.Resources.Jobs["job1"].Name)
|
assert.Equal(t, "job1", b.Config.Resources.Jobs["job1"].Name)
|
||||||
assert.Equal(t, "pipeline1", b.Config.Resources.Pipelines["pipeline1"].Name)
|
assert.Equal(t, "pipeline1", b.Config.Resources.Pipelines["pipeline1"].Name)
|
||||||
assert.False(t, b.Config.Resources.Pipelines["pipeline1"].PipelineSpec.Development)
|
assert.False(t, b.Config.Resources.Pipelines["pipeline1"].PipelineSpec.Development)
|
||||||
assert.Equal(t, "servingendpoint1", b.Config.Resources.ModelServingEndpoints["servingendpoint1"].Name)
|
assert.Equal(t, "servingendpoint1", b.Config.Resources.ModelServingEndpoints["servingendpoint1"].Name)
|
||||||
assert.Equal(t, "registeredmodel1", b.Config.Resources.RegisteredModels["registeredmodel1"].Name)
|
assert.Equal(t, "registeredmodel1", b.Config.Resources.RegisteredModels["registeredmodel1"].Name)
|
||||||
|
assert.Equal(t, "qualityMonitor1", b.Config.Resources.QualityMonitors["qualityMonitor1"].TableName)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestProcessTargetModeProduction(t *testing.T) {
|
func TestProcessTargetModeProduction(t *testing.T) {
|
||||||
b := mockBundle(config.Production)
|
b := mockBundle(config.Production)
|
||||||
|
|
||||||
err := validateProductionMode(context.Background(), b, false)
|
diags := validateProductionMode(context.Background(), b, false)
|
||||||
require.ErrorContains(t, err, "run_as")
|
require.ErrorContains(t, diags.Error(), "run_as")
|
||||||
|
|
||||||
b.Config.Workspace.StatePath = "/Shared/.bundle/x/y/state"
|
b.Config.Workspace.StatePath = "/Shared/.bundle/x/y/state"
|
||||||
b.Config.Workspace.ArtifactPath = "/Shared/.bundle/x/y/artifacts"
|
b.Config.Workspace.ArtifactPath = "/Shared/.bundle/x/y/artifacts"
|
||||||
b.Config.Workspace.FilePath = "/Shared/.bundle/x/y/files"
|
b.Config.Workspace.FilePath = "/Shared/.bundle/x/y/files"
|
||||||
|
|
||||||
err = validateProductionMode(context.Background(), b, false)
|
diags = validateProductionMode(context.Background(), b, false)
|
||||||
require.ErrorContains(t, err, "production")
|
require.ErrorContains(t, diags.Error(), "production")
|
||||||
|
|
||||||
permissions := []resources.Permission{
|
permissions := []resources.Permission{
|
||||||
{
|
{
|
||||||
|
@ -231,26 +255,27 @@ func TestProcessTargetModeProduction(t *testing.T) {
|
||||||
b.Config.Resources.Models["model1"].Permissions = permissions
|
b.Config.Resources.Models["model1"].Permissions = permissions
|
||||||
b.Config.Resources.ModelServingEndpoints["servingendpoint1"].Permissions = permissions
|
b.Config.Resources.ModelServingEndpoints["servingendpoint1"].Permissions = permissions
|
||||||
|
|
||||||
err = validateProductionMode(context.Background(), b, false)
|
diags = validateProductionMode(context.Background(), b, false)
|
||||||
require.NoError(t, err)
|
require.NoError(t, diags.Error())
|
||||||
|
|
||||||
assert.Equal(t, "job1", b.Config.Resources.Jobs["job1"].Name)
|
assert.Equal(t, "job1", b.Config.Resources.Jobs["job1"].Name)
|
||||||
assert.Equal(t, "pipeline1", b.Config.Resources.Pipelines["pipeline1"].Name)
|
assert.Equal(t, "pipeline1", b.Config.Resources.Pipelines["pipeline1"].Name)
|
||||||
assert.False(t, b.Config.Resources.Pipelines["pipeline1"].PipelineSpec.Development)
|
assert.False(t, b.Config.Resources.Pipelines["pipeline1"].PipelineSpec.Development)
|
||||||
assert.Equal(t, "servingendpoint1", b.Config.Resources.ModelServingEndpoints["servingendpoint1"].Name)
|
assert.Equal(t, "servingendpoint1", b.Config.Resources.ModelServingEndpoints["servingendpoint1"].Name)
|
||||||
assert.Equal(t, "registeredmodel1", b.Config.Resources.RegisteredModels["registeredmodel1"].Name)
|
assert.Equal(t, "registeredmodel1", b.Config.Resources.RegisteredModels["registeredmodel1"].Name)
|
||||||
|
assert.Equal(t, "qualityMonitor1", b.Config.Resources.QualityMonitors["qualityMonitor1"].TableName)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestProcessTargetModeProductionOkForPrincipal(t *testing.T) {
|
func TestProcessTargetModeProductionOkForPrincipal(t *testing.T) {
|
||||||
b := mockBundle(config.Production)
|
b := mockBundle(config.Production)
|
||||||
|
|
||||||
// Our target has all kinds of problems when not using service principals ...
|
// Our target has all kinds of problems when not using service principals ...
|
||||||
err := validateProductionMode(context.Background(), b, false)
|
diags := validateProductionMode(context.Background(), b, false)
|
||||||
require.Error(t, err)
|
require.Error(t, diags.Error())
|
||||||
|
|
||||||
// ... but we're much less strict when a principal is used
|
// ... but we're much less strict when a principal is used
|
||||||
err = validateProductionMode(context.Background(), b, true)
|
diags = validateProductionMode(context.Background(), b, true)
|
||||||
require.NoError(t, err)
|
require.NoError(t, diags.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
// Make sure that we have test coverage for all resource types
|
// Make sure that we have test coverage for all resource types
|
||||||
|
@ -274,12 +299,12 @@ func TestAllResourcesMocked(t *testing.T) {
|
||||||
// Make sure that we at least rename all resources
|
// Make sure that we at least rename all resources
|
||||||
func TestAllResourcesRenamed(t *testing.T) {
|
func TestAllResourcesRenamed(t *testing.T) {
|
||||||
b := mockBundle(config.Development)
|
b := mockBundle(config.Development)
|
||||||
resources := reflect.ValueOf(b.Config.Resources)
|
|
||||||
|
|
||||||
m := ProcessTargetMode()
|
m := ProcessTargetMode()
|
||||||
err := bundle.Apply(context.Background(), b, m)
|
diags := bundle.Apply(context.Background(), b, m)
|
||||||
require.NoError(t, err)
|
require.NoError(t, diags.Error())
|
||||||
|
|
||||||
|
resources := reflect.ValueOf(b.Config.Resources)
|
||||||
for i := 0; i < resources.NumField(); i++ {
|
for i := 0; i < resources.NumField(); i++ {
|
||||||
field := resources.Field(i)
|
field := resources.Field(i)
|
||||||
|
|
||||||
|
@ -300,3 +325,23 @@ func TestAllResourcesRenamed(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestDisableLocking(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
b := mockBundle(config.Development)
|
||||||
|
|
||||||
|
err := bundle.Apply(ctx, b, ProcessTargetMode())
|
||||||
|
require.Nil(t, err)
|
||||||
|
assert.False(t, b.Config.Bundle.Deployment.Lock.IsEnabled())
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDisableLockingDisabled(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
b := mockBundle(config.Development)
|
||||||
|
explicitlyEnabled := true
|
||||||
|
b.Config.Bundle.Deployment.Lock.Enabled = &explicitlyEnabled
|
||||||
|
|
||||||
|
err := bundle.Apply(ctx, b, ProcessTargetMode())
|
||||||
|
require.Nil(t, err)
|
||||||
|
assert.True(t, b.Config.Bundle.Deployment.Lock.IsEnabled(), "Deployment lock should remain enabled in development mode when explicitly enabled")
|
||||||
|
}
|
||||||
|
|
|
@ -0,0 +1,42 @@
|
||||||
|
package python
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"io"
|
||||||
|
|
||||||
|
"github.com/databricks/cli/libs/log"
|
||||||
|
)
|
||||||
|
|
||||||
|
type logWriter struct {
|
||||||
|
ctx context.Context
|
||||||
|
prefix string
|
||||||
|
buf bytes.Buffer
|
||||||
|
}
|
||||||
|
|
||||||
|
// newLogWriter creates a new io.Writer that writes to log with specified prefix.
|
||||||
|
func newLogWriter(ctx context.Context, prefix string) io.Writer {
|
||||||
|
return &logWriter{
|
||||||
|
ctx: ctx,
|
||||||
|
prefix: prefix,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *logWriter) Write(bytes []byte) (n int, err error) {
|
||||||
|
p.buf.Write(bytes)
|
||||||
|
|
||||||
|
scanner := bufio.NewScanner(&p.buf)
|
||||||
|
|
||||||
|
for scanner.Scan() {
|
||||||
|
line := scanner.Text()
|
||||||
|
|
||||||
|
log.Debugf(p.ctx, "%s%s", p.prefix, line)
|
||||||
|
}
|
||||||
|
|
||||||
|
remaining := p.buf.Bytes()
|
||||||
|
p.buf.Reset()
|
||||||
|
p.buf.Write(remaining)
|
||||||
|
|
||||||
|
return len(bytes), nil
|
||||||
|
}
|
|
@ -0,0 +1,97 @@
|
||||||
|
package python
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
|
||||||
|
"github.com/databricks/cli/libs/diag"
|
||||||
|
"github.com/databricks/cli/libs/dyn"
|
||||||
|
)
|
||||||
|
|
||||||
|
type pythonDiagnostic struct {
|
||||||
|
Severity pythonSeverity `json:"severity"`
|
||||||
|
Summary string `json:"summary"`
|
||||||
|
Detail string `json:"detail,omitempty"`
|
||||||
|
Location pythonDiagnosticLocation `json:"location,omitempty"`
|
||||||
|
Path string `json:"path,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type pythonDiagnosticLocation struct {
|
||||||
|
File string `json:"file"`
|
||||||
|
Line int `json:"line"`
|
||||||
|
Column int `json:"column"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type pythonSeverity = string
|
||||||
|
|
||||||
|
const (
|
||||||
|
pythonError pythonSeverity = "error"
|
||||||
|
pythonWarning pythonSeverity = "warning"
|
||||||
|
)
|
||||||
|
|
||||||
|
// parsePythonDiagnostics parses diagnostics from the Python mutator.
|
||||||
|
//
|
||||||
|
// diagnostics file is newline-separated JSON objects with pythonDiagnostic structure.
|
||||||
|
func parsePythonDiagnostics(input io.Reader) (diag.Diagnostics, error) {
|
||||||
|
diags := diag.Diagnostics{}
|
||||||
|
decoder := json.NewDecoder(input)
|
||||||
|
|
||||||
|
for decoder.More() {
|
||||||
|
var parsedLine pythonDiagnostic
|
||||||
|
|
||||||
|
err := decoder.Decode(&parsedLine)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to parse diags: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
severity, err := convertPythonSeverity(parsedLine.Severity)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to parse severity: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
path, err := convertPythonPath(parsedLine.Path)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to parse path: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
diag := diag.Diagnostic{
|
||||||
|
Severity: severity,
|
||||||
|
Summary: parsedLine.Summary,
|
||||||
|
Detail: parsedLine.Detail,
|
||||||
|
Location: convertPythonLocation(parsedLine.Location),
|
||||||
|
Path: path,
|
||||||
|
}
|
||||||
|
|
||||||
|
diags = diags.Append(diag)
|
||||||
|
}
|
||||||
|
|
||||||
|
return diags, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func convertPythonPath(path string) (dyn.Path, error) {
|
||||||
|
if path == "" {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return dyn.NewPathFromString(path)
|
||||||
|
}
|
||||||
|
|
||||||
|
func convertPythonSeverity(severity pythonSeverity) (diag.Severity, error) {
|
||||||
|
switch severity {
|
||||||
|
case pythonError:
|
||||||
|
return diag.Error, nil
|
||||||
|
case pythonWarning:
|
||||||
|
return diag.Warning, nil
|
||||||
|
default:
|
||||||
|
return 0, fmt.Errorf("unexpected value: %s", severity)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func convertPythonLocation(location pythonDiagnosticLocation) dyn.Location {
|
||||||
|
return dyn.Location{
|
||||||
|
File: location.File,
|
||||||
|
Line: location.Line,
|
||||||
|
Column: location.Column,
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,107 @@
|
||||||
|
package python
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/databricks/cli/libs/diag"
|
||||||
|
"github.com/databricks/cli/libs/dyn"
|
||||||
|
assert "github.com/databricks/cli/libs/dyn/dynassert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestConvertPythonLocation(t *testing.T) {
|
||||||
|
location := convertPythonLocation(pythonDiagnosticLocation{
|
||||||
|
File: "src/examples/file.py",
|
||||||
|
Line: 1,
|
||||||
|
Column: 2,
|
||||||
|
})
|
||||||
|
|
||||||
|
assert.Equal(t, dyn.Location{
|
||||||
|
File: "src/examples/file.py",
|
||||||
|
Line: 1,
|
||||||
|
Column: 2,
|
||||||
|
}, location)
|
||||||
|
}
|
||||||
|
|
||||||
|
type parsePythonDiagnosticsTest struct {
|
||||||
|
name string
|
||||||
|
input string
|
||||||
|
expected diag.Diagnostics
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestParsePythonDiagnostics(t *testing.T) {
|
||||||
|
|
||||||
|
testCases := []parsePythonDiagnosticsTest{
|
||||||
|
{
|
||||||
|
name: "short error with location",
|
||||||
|
input: `{"severity": "error", "summary": "error summary", "location": {"file": "src/examples/file.py", "line": 1, "column": 2}}`,
|
||||||
|
expected: diag.Diagnostics{
|
||||||
|
{
|
||||||
|
Severity: diag.Error,
|
||||||
|
Summary: "error summary",
|
||||||
|
Location: dyn.Location{
|
||||||
|
File: "src/examples/file.py",
|
||||||
|
Line: 1,
|
||||||
|
Column: 2,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "short error with path",
|
||||||
|
input: `{"severity": "error", "summary": "error summary", "path": "resources.jobs.job0.name"}`,
|
||||||
|
expected: diag.Diagnostics{
|
||||||
|
{
|
||||||
|
Severity: diag.Error,
|
||||||
|
Summary: "error summary",
|
||||||
|
Path: dyn.MustPathFromString("resources.jobs.job0.name"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "empty file",
|
||||||
|
input: "",
|
||||||
|
expected: diag.Diagnostics{},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "newline file",
|
||||||
|
input: "\n",
|
||||||
|
expected: diag.Diagnostics{},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "warning with detail",
|
||||||
|
input: `{"severity": "warning", "summary": "warning summary", "detail": "warning detail"}`,
|
||||||
|
expected: diag.Diagnostics{
|
||||||
|
{
|
||||||
|
Severity: diag.Warning,
|
||||||
|
Summary: "warning summary",
|
||||||
|
Detail: "warning detail",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "multiple errors",
|
||||||
|
input: `{"severity": "error", "summary": "error summary (1)"}` + "\n" +
|
||||||
|
`{"severity": "error", "summary": "error summary (2)"}`,
|
||||||
|
expected: diag.Diagnostics{
|
||||||
|
{
|
||||||
|
Severity: diag.Error,
|
||||||
|
Summary: "error summary (1)",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Severity: diag.Error,
|
||||||
|
Summary: "error summary (2)",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range testCases {
|
||||||
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
|
diags, err := parsePythonDiagnostics(bytes.NewReader([]byte(tc.input)))
|
||||||
|
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t, tc.expected, diags)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,433 @@
|
||||||
|
package python
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"runtime"
|
||||||
|
|
||||||
|
"github.com/databricks/databricks-sdk-go/logger"
|
||||||
|
|
||||||
|
"github.com/databricks/cli/bundle/env"
|
||||||
|
|
||||||
|
"github.com/databricks/cli/bundle"
|
||||||
|
"github.com/databricks/cli/bundle/config"
|
||||||
|
"github.com/databricks/cli/libs/diag"
|
||||||
|
"github.com/databricks/cli/libs/dyn"
|
||||||
|
"github.com/databricks/cli/libs/dyn/convert"
|
||||||
|
"github.com/databricks/cli/libs/dyn/merge"
|
||||||
|
"github.com/databricks/cli/libs/dyn/yamlloader"
|
||||||
|
"github.com/databricks/cli/libs/log"
|
||||||
|
"github.com/databricks/cli/libs/process"
|
||||||
|
)
|
||||||
|
|
||||||
|
type phase string
|
||||||
|
|
||||||
|
const (
|
||||||
|
// PythonMutatorPhaseLoad is the phase in which bundle configuration is loaded.
|
||||||
|
//
|
||||||
|
// At this stage, PyDABs adds statically defined resources to the bundle configuration.
|
||||||
|
// Which resources are added should be deterministic and not depend on the bundle configuration.
|
||||||
|
//
|
||||||
|
// We also open for possibility of appending other sections of bundle configuration,
|
||||||
|
// for example, adding new variables. However, this is not supported yet, and CLI rejects
|
||||||
|
// such changes.
|
||||||
|
PythonMutatorPhaseLoad phase = "load"
|
||||||
|
|
||||||
|
// PythonMutatorPhaseInit is the phase after bundle configuration was loaded, and
|
||||||
|
// the list of statically declared resources is known.
|
||||||
|
//
|
||||||
|
// At this stage, PyDABs adds resources defined using generators, or mutates existing resources,
|
||||||
|
// including the ones defined using YAML.
|
||||||
|
//
|
||||||
|
// During this process, within generator and mutators, PyDABs can access:
|
||||||
|
// - selected deployment target
|
||||||
|
// - bundle variables values
|
||||||
|
// - variables provided through CLI arguments or environment variables
|
||||||
|
//
|
||||||
|
// The following is not available:
|
||||||
|
// - variables referencing other variables are in unresolved format
|
||||||
|
//
|
||||||
|
// PyDABs can output YAML containing references to variables, and CLI should resolve them.
|
||||||
|
//
|
||||||
|
// Existing resources can't be removed, and CLI rejects such changes.
|
||||||
|
PythonMutatorPhaseInit phase = "init"
|
||||||
|
)
|
||||||
|
|
||||||
|
type pythonMutator struct {
|
||||||
|
phase phase
|
||||||
|
}
|
||||||
|
|
||||||
|
func PythonMutator(phase phase) bundle.Mutator {
|
||||||
|
return &pythonMutator{
|
||||||
|
phase: phase,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *pythonMutator) Name() string {
|
||||||
|
return fmt.Sprintf("PythonMutator(%s)", m.phase)
|
||||||
|
}
|
||||||
|
|
||||||
|
func getExperimental(b *bundle.Bundle) config.Experimental {
|
||||||
|
if b.Config.Experimental == nil {
|
||||||
|
return config.Experimental{}
|
||||||
|
}
|
||||||
|
|
||||||
|
return *b.Config.Experimental
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *pythonMutator) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||||
|
experimental := getExperimental(b)
|
||||||
|
|
||||||
|
if !experimental.PyDABs.Enabled {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if experimental.PyDABs.VEnvPath == "" {
|
||||||
|
return diag.Errorf("\"experimental.pydabs.enabled\" can only be used when \"experimental.pydabs.venv_path\" is set")
|
||||||
|
}
|
||||||
|
|
||||||
|
// mutateDiags is used because Mutate returns 'error' instead of 'diag.Diagnostics'
|
||||||
|
var mutateDiags diag.Diagnostics
|
||||||
|
var mutateDiagsHasError = errors.New("unexpected error")
|
||||||
|
|
||||||
|
err := b.Config.Mutate(func(leftRoot dyn.Value) (dyn.Value, error) {
|
||||||
|
pythonPath := interpreterPath(experimental.PyDABs.VEnvPath)
|
||||||
|
|
||||||
|
if _, err := os.Stat(pythonPath); err != nil {
|
||||||
|
if os.IsNotExist(err) {
|
||||||
|
return dyn.InvalidValue, fmt.Errorf("can't find %q, check if venv is created", pythonPath)
|
||||||
|
} else {
|
||||||
|
return dyn.InvalidValue, fmt.Errorf("can't find %q: %w", pythonPath, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
cacheDir, err := createCacheDir(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return dyn.InvalidValue, fmt.Errorf("failed to create cache dir: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
rightRoot, diags := m.runPythonMutator(ctx, cacheDir, b.RootPath, pythonPath, leftRoot)
|
||||||
|
mutateDiags = diags
|
||||||
|
if diags.HasError() {
|
||||||
|
return dyn.InvalidValue, mutateDiagsHasError
|
||||||
|
}
|
||||||
|
|
||||||
|
visitor, err := createOverrideVisitor(ctx, m.phase)
|
||||||
|
if err != nil {
|
||||||
|
return dyn.InvalidValue, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return merge.Override(leftRoot, rightRoot, visitor)
|
||||||
|
})
|
||||||
|
|
||||||
|
if err == mutateDiagsHasError {
|
||||||
|
if !mutateDiags.HasError() {
|
||||||
|
panic("mutateDiags has no error, but error is expected")
|
||||||
|
}
|
||||||
|
|
||||||
|
return mutateDiags
|
||||||
|
}
|
||||||
|
|
||||||
|
return mutateDiags.Extend(diag.FromErr(err))
|
||||||
|
}
|
||||||
|
|
||||||
|
func createCacheDir(ctx context.Context) (string, error) {
|
||||||
|
// b.CacheDir doesn't work because target isn't yet selected
|
||||||
|
|
||||||
|
// support the same env variable as in b.CacheDir
|
||||||
|
if tempDir, exists := env.TempDir(ctx); exists {
|
||||||
|
// use 'default' as target name
|
||||||
|
cacheDir := filepath.Join(tempDir, "default", "pydabs")
|
||||||
|
|
||||||
|
err := os.MkdirAll(cacheDir, 0700)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
return cacheDir, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return os.MkdirTemp("", "-pydabs")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *pythonMutator) runPythonMutator(ctx context.Context, cacheDir string, rootPath string, pythonPath string, root dyn.Value) (dyn.Value, diag.Diagnostics) {
|
||||||
|
inputPath := filepath.Join(cacheDir, "input.json")
|
||||||
|
outputPath := filepath.Join(cacheDir, "output.json")
|
||||||
|
diagnosticsPath := filepath.Join(cacheDir, "diagnostics.json")
|
||||||
|
|
||||||
|
args := []string{
|
||||||
|
pythonPath,
|
||||||
|
"-m",
|
||||||
|
"databricks.bundles.build",
|
||||||
|
"--phase",
|
||||||
|
string(m.phase),
|
||||||
|
"--input",
|
||||||
|
inputPath,
|
||||||
|
"--output",
|
||||||
|
outputPath,
|
||||||
|
"--diagnostics",
|
||||||
|
diagnosticsPath,
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := writeInputFile(inputPath, root); err != nil {
|
||||||
|
return dyn.InvalidValue, diag.Errorf("failed to write input file: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
stderrWriter := newLogWriter(ctx, "stderr: ")
|
||||||
|
stdoutWriter := newLogWriter(ctx, "stdout: ")
|
||||||
|
|
||||||
|
_, processErr := process.Background(
|
||||||
|
ctx,
|
||||||
|
args,
|
||||||
|
process.WithDir(rootPath),
|
||||||
|
process.WithStderrWriter(stderrWriter),
|
||||||
|
process.WithStdoutWriter(stdoutWriter),
|
||||||
|
)
|
||||||
|
if processErr != nil {
|
||||||
|
logger.Debugf(ctx, "python mutator process failed: %s", processErr)
|
||||||
|
}
|
||||||
|
|
||||||
|
pythonDiagnostics, pythonDiagnosticsErr := loadDiagnosticsFile(diagnosticsPath)
|
||||||
|
if pythonDiagnosticsErr != nil {
|
||||||
|
logger.Debugf(ctx, "failed to load diagnostics: %s", pythonDiagnosticsErr)
|
||||||
|
}
|
||||||
|
|
||||||
|
// if diagnostics file exists, it gives the most descriptive errors
|
||||||
|
// if there is any error, we treat it as fatal error, and stop processing
|
||||||
|
if pythonDiagnostics.HasError() {
|
||||||
|
return dyn.InvalidValue, pythonDiagnostics
|
||||||
|
}
|
||||||
|
|
||||||
|
// process can fail without reporting errors in diagnostics file or creating it, for instance,
|
||||||
|
// venv doesn't have PyDABs library installed
|
||||||
|
if processErr != nil {
|
||||||
|
return dyn.InvalidValue, diag.Errorf("python mutator process failed: %sw, use --debug to enable logging", processErr)
|
||||||
|
}
|
||||||
|
|
||||||
|
// or we can fail to read diagnostics file, that should always be created
|
||||||
|
if pythonDiagnosticsErr != nil {
|
||||||
|
return dyn.InvalidValue, diag.Errorf("failed to load diagnostics: %s", pythonDiagnosticsErr)
|
||||||
|
}
|
||||||
|
|
||||||
|
output, err := loadOutputFile(rootPath, outputPath)
|
||||||
|
if err != nil {
|
||||||
|
return dyn.InvalidValue, diag.Errorf("failed to load Python mutator output: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// we pass through pythonDiagnostic because it contains warnings
|
||||||
|
return output, pythonDiagnostics
|
||||||
|
}
|
||||||
|
|
||||||
|
func writeInputFile(inputPath string, input dyn.Value) error {
|
||||||
|
// we need to marshal dyn.Value instead of bundle.Config to JSON to support
|
||||||
|
// non-string fields assigned with bundle variables
|
||||||
|
rootConfigJson, err := json.Marshal(input.AsAny())
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to marshal input: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return os.WriteFile(inputPath, rootConfigJson, 0600)
|
||||||
|
}
|
||||||
|
|
||||||
|
func loadOutputFile(rootPath string, outputPath string) (dyn.Value, error) {
|
||||||
|
outputFile, err := os.Open(outputPath)
|
||||||
|
if err != nil {
|
||||||
|
return dyn.InvalidValue, fmt.Errorf("failed to open output file: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
defer outputFile.Close()
|
||||||
|
|
||||||
|
// we need absolute path because later parts of pipeline assume all paths are absolute
|
||||||
|
// and this file will be used as location to resolve relative paths.
|
||||||
|
//
|
||||||
|
// virtualPath has to stay in rootPath, because locations outside root path are not allowed:
|
||||||
|
//
|
||||||
|
// Error: path /var/folders/.../pydabs/dist/*.whl is not contained in bundle root path
|
||||||
|
//
|
||||||
|
// for that, we pass virtualPath instead of outputPath as file location
|
||||||
|
virtualPath, err := filepath.Abs(filepath.Join(rootPath, "__generated_by_pydabs__.yml"))
|
||||||
|
if err != nil {
|
||||||
|
return dyn.InvalidValue, fmt.Errorf("failed to get absolute path: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
generated, err := yamlloader.LoadYAML(virtualPath, outputFile)
|
||||||
|
if err != nil {
|
||||||
|
return dyn.InvalidValue, fmt.Errorf("failed to parse output file: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
normalized, diagnostic := convert.Normalize(config.Root{}, generated)
|
||||||
|
if diagnostic.Error() != nil {
|
||||||
|
return dyn.InvalidValue, fmt.Errorf("failed to normalize output: %w", diagnostic.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
// warnings shouldn't happen because output should be already normalized
|
||||||
|
// when it happens, it's a bug in the mutator, and should be treated as an error
|
||||||
|
|
||||||
|
for _, d := range diagnostic.Filter(diag.Warning) {
|
||||||
|
return dyn.InvalidValue, fmt.Errorf("failed to normalize output: %s", d.Summary)
|
||||||
|
}
|
||||||
|
|
||||||
|
return normalized, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// loadDiagnosticsFile loads diagnostics from a file.
|
||||||
|
//
|
||||||
|
// It contains a list of warnings and errors that we should print to users.
|
||||||
|
//
|
||||||
|
// If the file doesn't exist, we return an error. We expect the file to always be
|
||||||
|
// created by the Python mutator, and it's absence means there are integration problems,
|
||||||
|
// and the diagnostics file was lost. If we treat non-existence as an empty diag.Diagnostics
|
||||||
|
// we risk loosing errors and warnings.
|
||||||
|
func loadDiagnosticsFile(path string) (diag.Diagnostics, error) {
|
||||||
|
file, err := os.Open(path)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to open diagnostics file: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
defer file.Close()
|
||||||
|
|
||||||
|
return parsePythonDiagnostics(file)
|
||||||
|
}
|
||||||
|
|
||||||
|
func createOverrideVisitor(ctx context.Context, phase phase) (merge.OverrideVisitor, error) {
|
||||||
|
switch phase {
|
||||||
|
case PythonMutatorPhaseLoad:
|
||||||
|
return createLoadOverrideVisitor(ctx), nil
|
||||||
|
case PythonMutatorPhaseInit:
|
||||||
|
return createInitOverrideVisitor(ctx), nil
|
||||||
|
default:
|
||||||
|
return merge.OverrideVisitor{}, fmt.Errorf("unknown phase: %s", phase)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// createLoadOverrideVisitor creates an override visitor for the load phase.
|
||||||
|
//
|
||||||
|
// During load, it's only possible to create new resources, and not modify or
|
||||||
|
// delete existing ones.
|
||||||
|
func createLoadOverrideVisitor(ctx context.Context) merge.OverrideVisitor {
|
||||||
|
resourcesPath := dyn.NewPath(dyn.Key("resources"))
|
||||||
|
jobsPath := dyn.NewPath(dyn.Key("resources"), dyn.Key("jobs"))
|
||||||
|
|
||||||
|
return merge.OverrideVisitor{
|
||||||
|
VisitDelete: func(valuePath dyn.Path, left dyn.Value) error {
|
||||||
|
if isOmitemptyDelete(left) {
|
||||||
|
return merge.ErrOverrideUndoDelete
|
||||||
|
}
|
||||||
|
|
||||||
|
return fmt.Errorf("unexpected change at %q (delete)", valuePath.String())
|
||||||
|
},
|
||||||
|
VisitInsert: func(valuePath dyn.Path, right dyn.Value) (dyn.Value, error) {
|
||||||
|
// insert 'resources' or 'resources.jobs' if it didn't exist before
|
||||||
|
if valuePath.Equal(resourcesPath) || valuePath.Equal(jobsPath) {
|
||||||
|
return right, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if !valuePath.HasPrefix(jobsPath) {
|
||||||
|
return dyn.InvalidValue, fmt.Errorf("unexpected change at %q (insert)", valuePath.String())
|
||||||
|
}
|
||||||
|
|
||||||
|
insertResource := len(valuePath) == len(jobsPath)+1
|
||||||
|
|
||||||
|
// adding a property into an existing resource is not allowed, because it changes it
|
||||||
|
if !insertResource {
|
||||||
|
return dyn.InvalidValue, fmt.Errorf("unexpected change at %q (insert)", valuePath.String())
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Debugf(ctx, "Insert value at %q", valuePath.String())
|
||||||
|
|
||||||
|
return right, nil
|
||||||
|
},
|
||||||
|
VisitUpdate: func(valuePath dyn.Path, left dyn.Value, right dyn.Value) (dyn.Value, error) {
|
||||||
|
return dyn.InvalidValue, fmt.Errorf("unexpected change at %q (update)", valuePath.String())
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// createInitOverrideVisitor creates an override visitor for the init phase.
|
||||||
|
//
|
||||||
|
// During the init phase it's possible to create new resources, modify existing
|
||||||
|
// resources, but not delete existing resources.
|
||||||
|
func createInitOverrideVisitor(ctx context.Context) merge.OverrideVisitor {
|
||||||
|
resourcesPath := dyn.NewPath(dyn.Key("resources"))
|
||||||
|
jobsPath := dyn.NewPath(dyn.Key("resources"), dyn.Key("jobs"))
|
||||||
|
|
||||||
|
return merge.OverrideVisitor{
|
||||||
|
VisitDelete: func(valuePath dyn.Path, left dyn.Value) error {
|
||||||
|
if isOmitemptyDelete(left) {
|
||||||
|
return merge.ErrOverrideUndoDelete
|
||||||
|
}
|
||||||
|
|
||||||
|
if !valuePath.HasPrefix(jobsPath) {
|
||||||
|
return fmt.Errorf("unexpected change at %q (delete)", valuePath.String())
|
||||||
|
}
|
||||||
|
|
||||||
|
deleteResource := len(valuePath) == len(jobsPath)+1
|
||||||
|
|
||||||
|
if deleteResource {
|
||||||
|
return fmt.Errorf("unexpected change at %q (delete)", valuePath.String())
|
||||||
|
}
|
||||||
|
|
||||||
|
// deleting properties is allowed because it only changes an existing resource
|
||||||
|
log.Debugf(ctx, "Delete value at %q", valuePath.String())
|
||||||
|
|
||||||
|
return nil
|
||||||
|
},
|
||||||
|
VisitInsert: func(valuePath dyn.Path, right dyn.Value) (dyn.Value, error) {
|
||||||
|
// insert 'resources' or 'resources.jobs' if it didn't exist before
|
||||||
|
if valuePath.Equal(resourcesPath) || valuePath.Equal(jobsPath) {
|
||||||
|
return right, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if !valuePath.HasPrefix(jobsPath) {
|
||||||
|
return dyn.InvalidValue, fmt.Errorf("unexpected change at %q (insert)", valuePath.String())
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Debugf(ctx, "Insert value at %q", valuePath.String())
|
||||||
|
|
||||||
|
return right, nil
|
||||||
|
},
|
||||||
|
VisitUpdate: func(valuePath dyn.Path, left dyn.Value, right dyn.Value) (dyn.Value, error) {
|
||||||
|
if !valuePath.HasPrefix(jobsPath) {
|
||||||
|
return dyn.InvalidValue, fmt.Errorf("unexpected change at %q (update)", valuePath.String())
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Debugf(ctx, "Update value at %q", valuePath.String())
|
||||||
|
|
||||||
|
return right, nil
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func isOmitemptyDelete(left dyn.Value) bool {
|
||||||
|
// PyDABs can omit empty sequences/mappings in output, because we don't track them as optional,
|
||||||
|
// there is no semantic difference between empty and missing, so we keep them as they were before
|
||||||
|
// PyDABs deleted them.
|
||||||
|
|
||||||
|
switch left.Kind() {
|
||||||
|
case dyn.KindMap:
|
||||||
|
return left.MustMap().Len() == 0
|
||||||
|
|
||||||
|
case dyn.KindSequence:
|
||||||
|
return len(left.MustSequence()) == 0
|
||||||
|
|
||||||
|
case dyn.KindNil:
|
||||||
|
// map/sequence can be nil, for instance, bad YAML like: `foo:<eof>`
|
||||||
|
return true
|
||||||
|
|
||||||
|
default:
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// interpreterPath returns platform-specific path to Python interpreter in the virtual environment.
|
||||||
|
func interpreterPath(venvPath string) string {
|
||||||
|
if runtime.GOOS == "windows" {
|
||||||
|
return filepath.Join(venvPath, "Scripts", "python3.exe")
|
||||||
|
} else {
|
||||||
|
return filepath.Join(venvPath, "bin", "python3")
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,623 @@
|
||||||
|
package python
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"os/exec"
|
||||||
|
"path/filepath"
|
||||||
|
"reflect"
|
||||||
|
"runtime"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/databricks/cli/libs/dyn/merge"
|
||||||
|
|
||||||
|
"github.com/databricks/cli/bundle/env"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
|
"golang.org/x/exp/maps"
|
||||||
|
|
||||||
|
"github.com/databricks/cli/libs/dyn"
|
||||||
|
|
||||||
|
"github.com/databricks/cli/bundle"
|
||||||
|
"github.com/databricks/cli/bundle/config"
|
||||||
|
assert "github.com/databricks/cli/libs/dyn/dynassert"
|
||||||
|
"github.com/databricks/cli/libs/process"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestPythonMutator_Name_load(t *testing.T) {
|
||||||
|
mutator := PythonMutator(PythonMutatorPhaseLoad)
|
||||||
|
|
||||||
|
assert.Equal(t, "PythonMutator(load)", mutator.Name())
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPythonMutator_Name_init(t *testing.T) {
|
||||||
|
mutator := PythonMutator(PythonMutatorPhaseInit)
|
||||||
|
|
||||||
|
assert.Equal(t, "PythonMutator(init)", mutator.Name())
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPythonMutator_load(t *testing.T) {
|
||||||
|
withFakeVEnv(t, ".venv")
|
||||||
|
|
||||||
|
b := loadYaml("databricks.yml", `
|
||||||
|
experimental:
|
||||||
|
pydabs:
|
||||||
|
enabled: true
|
||||||
|
venv_path: .venv
|
||||||
|
resources:
|
||||||
|
jobs:
|
||||||
|
job0:
|
||||||
|
name: job_0`)
|
||||||
|
|
||||||
|
ctx := withProcessStub(
|
||||||
|
t,
|
||||||
|
[]string{
|
||||||
|
interpreterPath(".venv"),
|
||||||
|
"-m",
|
||||||
|
"databricks.bundles.build",
|
||||||
|
"--phase",
|
||||||
|
"load",
|
||||||
|
},
|
||||||
|
`{
|
||||||
|
"experimental": {
|
||||||
|
"pydabs": {
|
||||||
|
"enabled": true,
|
||||||
|
"venv_path": ".venv"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"resources": {
|
||||||
|
"jobs": {
|
||||||
|
"job0": {
|
||||||
|
name: "job_0"
|
||||||
|
},
|
||||||
|
"job1": {
|
||||||
|
name: "job_1"
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}`,
|
||||||
|
`{"severity": "warning", "summary": "job doesn't have any tasks", "location": {"file": "src/examples/file.py", "line": 10, "column": 5}}`,
|
||||||
|
)
|
||||||
|
|
||||||
|
mutator := PythonMutator(PythonMutatorPhaseLoad)
|
||||||
|
diags := bundle.Apply(ctx, b, mutator)
|
||||||
|
|
||||||
|
assert.NoError(t, diags.Error())
|
||||||
|
|
||||||
|
assert.ElementsMatch(t, []string{"job0", "job1"}, maps.Keys(b.Config.Resources.Jobs))
|
||||||
|
|
||||||
|
if job0, ok := b.Config.Resources.Jobs["job0"]; ok {
|
||||||
|
assert.Equal(t, "job_0", job0.Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
if job1, ok := b.Config.Resources.Jobs["job1"]; ok {
|
||||||
|
assert.Equal(t, "job_1", job1.Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
assert.Equal(t, 1, len(diags))
|
||||||
|
assert.Equal(t, "job doesn't have any tasks", diags[0].Summary)
|
||||||
|
assert.Equal(t, dyn.Location{
|
||||||
|
File: "src/examples/file.py",
|
||||||
|
Line: 10,
|
||||||
|
Column: 5,
|
||||||
|
}, diags[0].Location)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPythonMutator_load_disallowed(t *testing.T) {
|
||||||
|
withFakeVEnv(t, ".venv")
|
||||||
|
|
||||||
|
b := loadYaml("databricks.yml", `
|
||||||
|
experimental:
|
||||||
|
pydabs:
|
||||||
|
enabled: true
|
||||||
|
venv_path: .venv
|
||||||
|
resources:
|
||||||
|
jobs:
|
||||||
|
job0:
|
||||||
|
name: job_0`)
|
||||||
|
|
||||||
|
ctx := withProcessStub(
|
||||||
|
t,
|
||||||
|
[]string{
|
||||||
|
interpreterPath(".venv"),
|
||||||
|
"-m",
|
||||||
|
"databricks.bundles.build",
|
||||||
|
"--phase",
|
||||||
|
"load",
|
||||||
|
},
|
||||||
|
`{
|
||||||
|
"experimental": {
|
||||||
|
"pydabs": {
|
||||||
|
"enabled": true,
|
||||||
|
"venv_path": ".venv"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"resources": {
|
||||||
|
"jobs": {
|
||||||
|
"job0": {
|
||||||
|
name: "job_0",
|
||||||
|
description: "job description"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}`, "")
|
||||||
|
|
||||||
|
mutator := PythonMutator(PythonMutatorPhaseLoad)
|
||||||
|
diag := bundle.Apply(ctx, b, mutator)
|
||||||
|
|
||||||
|
assert.EqualError(t, diag.Error(), "unexpected change at \"resources.jobs.job0.description\" (insert)")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPythonMutator_init(t *testing.T) {
|
||||||
|
withFakeVEnv(t, ".venv")
|
||||||
|
|
||||||
|
b := loadYaml("databricks.yml", `
|
||||||
|
experimental:
|
||||||
|
pydabs:
|
||||||
|
enabled: true
|
||||||
|
venv_path: .venv
|
||||||
|
resources:
|
||||||
|
jobs:
|
||||||
|
job0:
|
||||||
|
name: job_0`)
|
||||||
|
|
||||||
|
ctx := withProcessStub(
|
||||||
|
t,
|
||||||
|
[]string{
|
||||||
|
interpreterPath(".venv"),
|
||||||
|
"-m",
|
||||||
|
"databricks.bundles.build",
|
||||||
|
"--phase",
|
||||||
|
"init",
|
||||||
|
},
|
||||||
|
`{
|
||||||
|
"experimental": {
|
||||||
|
"pydabs": {
|
||||||
|
"enabled": true,
|
||||||
|
"venv_path": ".venv"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"resources": {
|
||||||
|
"jobs": {
|
||||||
|
"job0": {
|
||||||
|
name: "job_0",
|
||||||
|
description: "my job"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}`, "")
|
||||||
|
|
||||||
|
mutator := PythonMutator(PythonMutatorPhaseInit)
|
||||||
|
diag := bundle.Apply(ctx, b, mutator)
|
||||||
|
|
||||||
|
assert.NoError(t, diag.Error())
|
||||||
|
|
||||||
|
assert.ElementsMatch(t, []string{"job0"}, maps.Keys(b.Config.Resources.Jobs))
|
||||||
|
assert.Equal(t, "job_0", b.Config.Resources.Jobs["job0"].Name)
|
||||||
|
assert.Equal(t, "my job", b.Config.Resources.Jobs["job0"].Description)
|
||||||
|
|
||||||
|
err := b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) {
|
||||||
|
// 'name' wasn't changed, so it keeps its location
|
||||||
|
name, err := dyn.GetByPath(v, dyn.MustPathFromString("resources.jobs.job0.name"))
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Equal(t, "databricks.yml", name.Location().File)
|
||||||
|
|
||||||
|
// 'description' was updated by PyDABs and has location of generated file until
|
||||||
|
// we implement source maps
|
||||||
|
description, err := dyn.GetByPath(v, dyn.MustPathFromString("resources.jobs.job0.description"))
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
expectedVirtualPath, err := filepath.Abs("__generated_by_pydabs__.yml")
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Equal(t, expectedVirtualPath, description.Location().File)
|
||||||
|
|
||||||
|
return v, nil
|
||||||
|
})
|
||||||
|
assert.NoError(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPythonMutator_badOutput(t *testing.T) {
|
||||||
|
withFakeVEnv(t, ".venv")
|
||||||
|
|
||||||
|
b := loadYaml("databricks.yml", `
|
||||||
|
experimental:
|
||||||
|
pydabs:
|
||||||
|
enabled: true
|
||||||
|
venv_path: .venv
|
||||||
|
resources:
|
||||||
|
jobs:
|
||||||
|
job0:
|
||||||
|
name: job_0`)
|
||||||
|
|
||||||
|
ctx := withProcessStub(
|
||||||
|
t,
|
||||||
|
[]string{
|
||||||
|
interpreterPath(".venv"),
|
||||||
|
"-m",
|
||||||
|
"databricks.bundles.build",
|
||||||
|
"--phase",
|
||||||
|
"load",
|
||||||
|
},
|
||||||
|
`{
|
||||||
|
"resources": {
|
||||||
|
"jobs": {
|
||||||
|
"job0": {
|
||||||
|
unknown_property: "my job"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}`, "")
|
||||||
|
|
||||||
|
mutator := PythonMutator(PythonMutatorPhaseLoad)
|
||||||
|
diag := bundle.Apply(ctx, b, mutator)
|
||||||
|
|
||||||
|
assert.EqualError(t, diag.Error(), "failed to load Python mutator output: failed to normalize output: unknown field: unknown_property")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPythonMutator_disabled(t *testing.T) {
|
||||||
|
b := loadYaml("databricks.yml", ``)
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
mutator := PythonMutator(PythonMutatorPhaseLoad)
|
||||||
|
diag := bundle.Apply(ctx, b, mutator)
|
||||||
|
|
||||||
|
assert.NoError(t, diag.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPythonMutator_venvRequired(t *testing.T) {
|
||||||
|
b := loadYaml("databricks.yml", `
|
||||||
|
experimental:
|
||||||
|
pydabs:
|
||||||
|
enabled: true`)
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
mutator := PythonMutator(PythonMutatorPhaseLoad)
|
||||||
|
diag := bundle.Apply(ctx, b, mutator)
|
||||||
|
|
||||||
|
assert.Error(t, diag.Error(), "\"experimental.enable_pydabs\" is enabled, but \"experimental.venv.path\" is not set")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPythonMutator_venvNotFound(t *testing.T) {
|
||||||
|
expectedError := fmt.Sprintf("can't find %q, check if venv is created", interpreterPath("bad_path"))
|
||||||
|
|
||||||
|
b := loadYaml("databricks.yml", `
|
||||||
|
experimental:
|
||||||
|
pydabs:
|
||||||
|
enabled: true
|
||||||
|
venv_path: bad_path`)
|
||||||
|
|
||||||
|
mutator := PythonMutator(PythonMutatorPhaseInit)
|
||||||
|
diag := bundle.Apply(context.Background(), b, mutator)
|
||||||
|
|
||||||
|
assert.EqualError(t, diag.Error(), expectedError)
|
||||||
|
}
|
||||||
|
|
||||||
|
type createOverrideVisitorTestCase struct {
|
||||||
|
name string
|
||||||
|
updatePath dyn.Path
|
||||||
|
deletePath dyn.Path
|
||||||
|
insertPath dyn.Path
|
||||||
|
phase phase
|
||||||
|
updateError error
|
||||||
|
deleteError error
|
||||||
|
insertError error
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCreateOverrideVisitor(t *testing.T) {
|
||||||
|
left := dyn.V(42)
|
||||||
|
right := dyn.V(1337)
|
||||||
|
|
||||||
|
testCases := []createOverrideVisitorTestCase{
|
||||||
|
{
|
||||||
|
name: "load: can't change an existing job",
|
||||||
|
phase: PythonMutatorPhaseLoad,
|
||||||
|
updatePath: dyn.MustPathFromString("resources.jobs.job0.name"),
|
||||||
|
deletePath: dyn.MustPathFromString("resources.jobs.job0.name"),
|
||||||
|
insertPath: dyn.MustPathFromString("resources.jobs.job0.name"),
|
||||||
|
deleteError: fmt.Errorf("unexpected change at \"resources.jobs.job0.name\" (delete)"),
|
||||||
|
insertError: fmt.Errorf("unexpected change at \"resources.jobs.job0.name\" (insert)"),
|
||||||
|
updateError: fmt.Errorf("unexpected change at \"resources.jobs.job0.name\" (update)"),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "load: can't delete an existing job",
|
||||||
|
phase: PythonMutatorPhaseLoad,
|
||||||
|
deletePath: dyn.MustPathFromString("resources.jobs.job0"),
|
||||||
|
deleteError: fmt.Errorf("unexpected change at \"resources.jobs.job0\" (delete)"),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "load: can insert 'resources'",
|
||||||
|
phase: PythonMutatorPhaseLoad,
|
||||||
|
insertPath: dyn.MustPathFromString("resources"),
|
||||||
|
insertError: nil,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "load: can insert 'resources.jobs'",
|
||||||
|
phase: PythonMutatorPhaseLoad,
|
||||||
|
insertPath: dyn.MustPathFromString("resources.jobs"),
|
||||||
|
insertError: nil,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "load: can insert a job",
|
||||||
|
phase: PythonMutatorPhaseLoad,
|
||||||
|
insertPath: dyn.MustPathFromString("resources.jobs.job0"),
|
||||||
|
insertError: nil,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "load: can't change include",
|
||||||
|
phase: PythonMutatorPhaseLoad,
|
||||||
|
deletePath: dyn.MustPathFromString("include[0]"),
|
||||||
|
insertPath: dyn.MustPathFromString("include[0]"),
|
||||||
|
updatePath: dyn.MustPathFromString("include[0]"),
|
||||||
|
deleteError: fmt.Errorf("unexpected change at \"include[0]\" (delete)"),
|
||||||
|
insertError: fmt.Errorf("unexpected change at \"include[0]\" (insert)"),
|
||||||
|
updateError: fmt.Errorf("unexpected change at \"include[0]\" (update)"),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "init: can change an existing job",
|
||||||
|
phase: PythonMutatorPhaseInit,
|
||||||
|
updatePath: dyn.MustPathFromString("resources.jobs.job0.name"),
|
||||||
|
deletePath: dyn.MustPathFromString("resources.jobs.job0.name"),
|
||||||
|
insertPath: dyn.MustPathFromString("resources.jobs.job0.name"),
|
||||||
|
deleteError: nil,
|
||||||
|
insertError: nil,
|
||||||
|
updateError: nil,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "init: can't delete an existing job",
|
||||||
|
phase: PythonMutatorPhaseInit,
|
||||||
|
deletePath: dyn.MustPathFromString("resources.jobs.job0"),
|
||||||
|
deleteError: fmt.Errorf("unexpected change at \"resources.jobs.job0\" (delete)"),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "init: can insert 'resources'",
|
||||||
|
phase: PythonMutatorPhaseInit,
|
||||||
|
insertPath: dyn.MustPathFromString("resources"),
|
||||||
|
insertError: nil,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "init: can insert 'resources.jobs'",
|
||||||
|
phase: PythonMutatorPhaseInit,
|
||||||
|
insertPath: dyn.MustPathFromString("resources.jobs"),
|
||||||
|
insertError: nil,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "init: can insert a job",
|
||||||
|
phase: PythonMutatorPhaseInit,
|
||||||
|
insertPath: dyn.MustPathFromString("resources.jobs.job0"),
|
||||||
|
insertError: nil,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "init: can't change include",
|
||||||
|
phase: PythonMutatorPhaseInit,
|
||||||
|
deletePath: dyn.MustPathFromString("include[0]"),
|
||||||
|
insertPath: dyn.MustPathFromString("include[0]"),
|
||||||
|
updatePath: dyn.MustPathFromString("include[0]"),
|
||||||
|
deleteError: fmt.Errorf("unexpected change at \"include[0]\" (delete)"),
|
||||||
|
insertError: fmt.Errorf("unexpected change at \"include[0]\" (insert)"),
|
||||||
|
updateError: fmt.Errorf("unexpected change at \"include[0]\" (update)"),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range testCases {
|
||||||
|
visitor, err := createOverrideVisitor(context.Background(), tc.phase)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("create visitor failed: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if tc.updatePath != nil {
|
||||||
|
t.Run(tc.name+"-update", func(t *testing.T) {
|
||||||
|
out, err := visitor.VisitUpdate(tc.updatePath, left, right)
|
||||||
|
|
||||||
|
if tc.updateError != nil {
|
||||||
|
assert.Equal(t, tc.updateError, err)
|
||||||
|
} else {
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t, right, out)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
if tc.deletePath != nil {
|
||||||
|
t.Run(tc.name+"-delete", func(t *testing.T) {
|
||||||
|
err := visitor.VisitDelete(tc.deletePath, left)
|
||||||
|
|
||||||
|
if tc.deleteError != nil {
|
||||||
|
assert.Equal(t, tc.deleteError, err)
|
||||||
|
} else {
|
||||||
|
assert.NoError(t, err)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
if tc.insertPath != nil {
|
||||||
|
t.Run(tc.name+"-insert", func(t *testing.T) {
|
||||||
|
out, err := visitor.VisitInsert(tc.insertPath, right)
|
||||||
|
|
||||||
|
if tc.insertError != nil {
|
||||||
|
assert.Equal(t, tc.insertError, err)
|
||||||
|
} else {
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t, right, out)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type overrideVisitorOmitemptyTestCase struct {
|
||||||
|
name string
|
||||||
|
path dyn.Path
|
||||||
|
left dyn.Value
|
||||||
|
phases []phase
|
||||||
|
expectedErr error
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCreateOverrideVisitor_omitempty(t *testing.T) {
|
||||||
|
// PyDABs can omit empty sequences/mappings in output, because we don't track them as optional,
|
||||||
|
// there is no semantic difference between empty and missing, so we keep them as they were before
|
||||||
|
// PyDABs deleted them.
|
||||||
|
|
||||||
|
allPhases := []phase{PythonMutatorPhaseLoad, PythonMutatorPhaseInit}
|
||||||
|
location := dyn.Location{
|
||||||
|
File: "databricks.yml",
|
||||||
|
Line: 10,
|
||||||
|
Column: 20,
|
||||||
|
}
|
||||||
|
|
||||||
|
testCases := []overrideVisitorOmitemptyTestCase{
|
||||||
|
{
|
||||||
|
// this is not happening, but adding for completeness
|
||||||
|
name: "undo delete of empty variables",
|
||||||
|
path: dyn.MustPathFromString("variables"),
|
||||||
|
left: dyn.NewValue([]dyn.Value{}, []dyn.Location{location}),
|
||||||
|
expectedErr: merge.ErrOverrideUndoDelete,
|
||||||
|
phases: allPhases,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "undo delete of empty job clusters",
|
||||||
|
path: dyn.MustPathFromString("resources.jobs.job0.job_clusters"),
|
||||||
|
left: dyn.NewValue([]dyn.Value{}, []dyn.Location{location}),
|
||||||
|
expectedErr: merge.ErrOverrideUndoDelete,
|
||||||
|
phases: allPhases,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "allow delete of non-empty job clusters",
|
||||||
|
path: dyn.MustPathFromString("resources.jobs.job0.job_clusters"),
|
||||||
|
left: dyn.NewValue([]dyn.Value{dyn.NewValue("abc", []dyn.Location{location})}, []dyn.Location{location}),
|
||||||
|
expectedErr: nil,
|
||||||
|
// deletions aren't allowed in 'load' phase
|
||||||
|
phases: []phase{PythonMutatorPhaseInit},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "undo delete of empty tags",
|
||||||
|
path: dyn.MustPathFromString("resources.jobs.job0.tags"),
|
||||||
|
left: dyn.NewValue(map[string]dyn.Value{}, []dyn.Location{location}),
|
||||||
|
expectedErr: merge.ErrOverrideUndoDelete,
|
||||||
|
phases: allPhases,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "allow delete of non-empty tags",
|
||||||
|
path: dyn.MustPathFromString("resources.jobs.job0.tags"),
|
||||||
|
left: dyn.NewValue(map[string]dyn.Value{"dev": dyn.NewValue("true", []dyn.Location{location})}, []dyn.Location{location}),
|
||||||
|
|
||||||
|
expectedErr: nil,
|
||||||
|
// deletions aren't allowed in 'load' phase
|
||||||
|
phases: []phase{PythonMutatorPhaseInit},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "undo delete of nil",
|
||||||
|
path: dyn.MustPathFromString("resources.jobs.job0.tags"),
|
||||||
|
left: dyn.NilValue.WithLocations([]dyn.Location{location}),
|
||||||
|
expectedErr: merge.ErrOverrideUndoDelete,
|
||||||
|
phases: allPhases,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range testCases {
|
||||||
|
for _, phase := range tc.phases {
|
||||||
|
t.Run(tc.name+"-"+string(phase), func(t *testing.T) {
|
||||||
|
visitor, err := createOverrideVisitor(context.Background(), phase)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
err = visitor.VisitDelete(tc.path, tc.left)
|
||||||
|
|
||||||
|
assert.Equal(t, tc.expectedErr, err)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestLoadDiagnosticsFile_nonExistent(t *testing.T) {
|
||||||
|
// this is an important behaviour, see loadDiagnosticsFile docstring
|
||||||
|
_, err := loadDiagnosticsFile("non_existent_file.json")
|
||||||
|
|
||||||
|
assert.Error(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestInterpreterPath(t *testing.T) {
|
||||||
|
if runtime.GOOS == "windows" {
|
||||||
|
assert.Equal(t, "venv\\Scripts\\python3.exe", interpreterPath("venv"))
|
||||||
|
} else {
|
||||||
|
assert.Equal(t, "venv/bin/python3", interpreterPath("venv"))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func withProcessStub(t *testing.T, args []string, output string, diagnostics string) context.Context {
|
||||||
|
ctx := context.Background()
|
||||||
|
ctx, stub := process.WithStub(ctx)
|
||||||
|
|
||||||
|
t.Setenv(env.TempDirVariable, t.TempDir())
|
||||||
|
|
||||||
|
// after we override env variable, we always get the same cache dir as mutator
|
||||||
|
cacheDir, err := createCacheDir(ctx)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
inputPath := filepath.Join(cacheDir, "input.json")
|
||||||
|
outputPath := filepath.Join(cacheDir, "output.json")
|
||||||
|
diagnosticsPath := filepath.Join(cacheDir, "diagnostics.json")
|
||||||
|
|
||||||
|
args = append(args, "--input", inputPath)
|
||||||
|
args = append(args, "--output", outputPath)
|
||||||
|
args = append(args, "--diagnostics", diagnosticsPath)
|
||||||
|
|
||||||
|
stub.WithCallback(func(actual *exec.Cmd) error {
|
||||||
|
_, err := os.Stat(inputPath)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
if reflect.DeepEqual(actual.Args, args) {
|
||||||
|
err := os.WriteFile(outputPath, []byte(output), 0600)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
err = os.WriteFile(diagnosticsPath, []byte(diagnostics), 0600)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
} else {
|
||||||
|
return fmt.Errorf("unexpected command: %v", actual.Args)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
return ctx
|
||||||
|
}
|
||||||
|
|
||||||
|
func loadYaml(name string, content string) *bundle.Bundle {
|
||||||
|
v, diag := config.LoadFromBytes(name, []byte(content))
|
||||||
|
|
||||||
|
if diag.Error() != nil {
|
||||||
|
panic(diag.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
return &bundle.Bundle{
|
||||||
|
Config: *v,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func withFakeVEnv(t *testing.T, path string) {
|
||||||
|
interpreterPath := interpreterPath(path)
|
||||||
|
|
||||||
|
cwd, err := os.Getwd()
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := os.Chdir(t.TempDir()); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = os.MkdirAll(filepath.Dir(interpreterPath), 0755)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = os.WriteFile(interpreterPath, []byte(""), 0755)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Cleanup(func() {
|
||||||
|
if err := os.Chdir(cwd); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
|
@ -5,6 +5,7 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle"
|
"github.com/databricks/cli/bundle"
|
||||||
|
"github.com/databricks/cli/libs/diag"
|
||||||
"github.com/databricks/cli/libs/log"
|
"github.com/databricks/cli/libs/log"
|
||||||
"golang.org/x/sync/errgroup"
|
"golang.org/x/sync/errgroup"
|
||||||
)
|
)
|
||||||
|
@ -15,7 +16,7 @@ func ResolveResourceReferences() bundle.Mutator {
|
||||||
return &resolveResourceReferences{}
|
return &resolveResourceReferences{}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *resolveResourceReferences) Apply(ctx context.Context, b *bundle.Bundle) error {
|
func (m *resolveResourceReferences) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||||
errs, errCtx := errgroup.WithContext(ctx)
|
errs, errCtx := errgroup.WithContext(ctx)
|
||||||
|
|
||||||
for k := range b.Config.Variables {
|
for k := range b.Config.Variables {
|
||||||
|
@ -40,7 +41,7 @@ func (m *resolveResourceReferences) Apply(ctx context.Context, b *bundle.Bundle)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
return errs.Wait()
|
return diag.FromErr(errs.Wait())
|
||||||
}
|
}
|
||||||
|
|
||||||
func (*resolveResourceReferences) Name() string {
|
func (*resolveResourceReferences) Name() string {
|
||||||
|
|
|
@ -8,11 +8,13 @@ import (
|
||||||
"github.com/databricks/cli/bundle"
|
"github.com/databricks/cli/bundle"
|
||||||
"github.com/databricks/cli/bundle/config"
|
"github.com/databricks/cli/bundle/config"
|
||||||
"github.com/databricks/cli/bundle/config/variable"
|
"github.com/databricks/cli/bundle/config/variable"
|
||||||
|
"github.com/databricks/cli/libs/env"
|
||||||
"github.com/stretchr/testify/mock"
|
"github.com/stretchr/testify/mock"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
"github.com/databricks/databricks-sdk-go/experimental/mocks"
|
"github.com/databricks/databricks-sdk-go/experimental/mocks"
|
||||||
"github.com/databricks/databricks-sdk-go/service/compute"
|
"github.com/databricks/databricks-sdk-go/service/compute"
|
||||||
|
"github.com/databricks/databricks-sdk-go/service/iam"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestResolveClusterReference(t *testing.T) {
|
func TestResolveClusterReference(t *testing.T) {
|
||||||
|
@ -33,7 +35,7 @@ func TestResolveClusterReference(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
"some-variable": {
|
"some-variable": {
|
||||||
Value: &justString,
|
Value: justString,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -49,10 +51,10 @@ func TestResolveClusterReference(t *testing.T) {
|
||||||
ClusterId: "9876-5432-xywz",
|
ClusterId: "9876-5432-xywz",
|
||||||
}, nil)
|
}, nil)
|
||||||
|
|
||||||
err := bundle.Apply(context.Background(), b, ResolveResourceReferences())
|
diags := bundle.Apply(context.Background(), b, ResolveResourceReferences())
|
||||||
require.NoError(t, err)
|
require.NoError(t, diags.Error())
|
||||||
require.Equal(t, "1234-5678-abcd", *b.Config.Variables["my-cluster-id-1"].Value)
|
require.Equal(t, "1234-5678-abcd", b.Config.Variables["my-cluster-id-1"].Value)
|
||||||
require.Equal(t, "9876-5432-xywz", *b.Config.Variables["my-cluster-id-2"].Value)
|
require.Equal(t, "9876-5432-xywz", b.Config.Variables["my-cluster-id-2"].Value)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestResolveNonExistentClusterReference(t *testing.T) {
|
func TestResolveNonExistentClusterReference(t *testing.T) {
|
||||||
|
@ -67,7 +69,7 @@ func TestResolveNonExistentClusterReference(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
"some-variable": {
|
"some-variable": {
|
||||||
Value: &justString,
|
Value: justString,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -78,8 +80,8 @@ func TestResolveNonExistentClusterReference(t *testing.T) {
|
||||||
clusterApi := m.GetMockClustersAPI()
|
clusterApi := m.GetMockClustersAPI()
|
||||||
clusterApi.EXPECT().GetByClusterName(mock.Anything, clusterRef).Return(nil, fmt.Errorf("ClusterDetails named '%s' does not exist", clusterRef))
|
clusterApi.EXPECT().GetByClusterName(mock.Anything, clusterRef).Return(nil, fmt.Errorf("ClusterDetails named '%s' does not exist", clusterRef))
|
||||||
|
|
||||||
err := bundle.Apply(context.Background(), b, ResolveResourceReferences())
|
diags := bundle.Apply(context.Background(), b, ResolveResourceReferences())
|
||||||
require.ErrorContains(t, err, "failed to resolve cluster: Random, err: ClusterDetails named 'Random' does not exist")
|
require.ErrorContains(t, diags.Error(), "failed to resolve cluster: Random, err: ClusterDetails named 'Random' does not exist")
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestNoLookupIfVariableIsSet(t *testing.T) {
|
func TestNoLookupIfVariableIsSet(t *testing.T) {
|
||||||
|
@ -101,7 +103,119 @@ func TestNoLookupIfVariableIsSet(t *testing.T) {
|
||||||
|
|
||||||
b.Config.Variables["my-cluster-id"].Set("random value")
|
b.Config.Variables["my-cluster-id"].Set("random value")
|
||||||
|
|
||||||
err := bundle.Apply(context.Background(), b, ResolveResourceReferences())
|
diags := bundle.Apply(context.Background(), b, ResolveResourceReferences())
|
||||||
require.NoError(t, err)
|
require.NoError(t, diags.Error())
|
||||||
require.Equal(t, "random value", *b.Config.Variables["my-cluster-id"].Value)
|
require.Equal(t, "random value", b.Config.Variables["my-cluster-id"].Value)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestResolveServicePrincipal(t *testing.T) {
|
||||||
|
spName := "Some SP name"
|
||||||
|
b := &bundle.Bundle{
|
||||||
|
Config: config.Root{
|
||||||
|
Variables: map[string]*variable.Variable{
|
||||||
|
"my-sp": {
|
||||||
|
Lookup: &variable.Lookup{
|
||||||
|
ServicePrincipal: spName,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
m := mocks.NewMockWorkspaceClient(t)
|
||||||
|
b.SetWorkpaceClient(m.WorkspaceClient)
|
||||||
|
spApi := m.GetMockServicePrincipalsAPI()
|
||||||
|
spApi.EXPECT().GetByDisplayName(mock.Anything, spName).Return(&iam.ServicePrincipal{
|
||||||
|
Id: "1234",
|
||||||
|
ApplicationId: "app-1234",
|
||||||
|
}, nil)
|
||||||
|
|
||||||
|
diags := bundle.Apply(context.Background(), b, ResolveResourceReferences())
|
||||||
|
require.NoError(t, diags.Error())
|
||||||
|
require.Equal(t, "app-1234", b.Config.Variables["my-sp"].Value)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestResolveVariableReferencesInVariableLookups(t *testing.T) {
|
||||||
|
s := "bar"
|
||||||
|
b := &bundle.Bundle{
|
||||||
|
Config: config.Root{
|
||||||
|
Bundle: config.Bundle{
|
||||||
|
Target: "dev",
|
||||||
|
},
|
||||||
|
Variables: map[string]*variable.Variable{
|
||||||
|
"foo": {
|
||||||
|
Value: s,
|
||||||
|
},
|
||||||
|
"lookup": {
|
||||||
|
Lookup: &variable.Lookup{
|
||||||
|
Cluster: "cluster-${var.foo}-${bundle.target}",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
m := mocks.NewMockWorkspaceClient(t)
|
||||||
|
b.SetWorkpaceClient(m.WorkspaceClient)
|
||||||
|
clusterApi := m.GetMockClustersAPI()
|
||||||
|
clusterApi.EXPECT().GetByClusterName(mock.Anything, "cluster-bar-dev").Return(&compute.ClusterDetails{
|
||||||
|
ClusterId: "1234-5678-abcd",
|
||||||
|
}, nil)
|
||||||
|
|
||||||
|
diags := bundle.Apply(context.Background(), b, bundle.Seq(ResolveVariableReferencesInLookup(), ResolveResourceReferences()))
|
||||||
|
require.NoError(t, diags.Error())
|
||||||
|
require.Equal(t, "cluster-bar-dev", b.Config.Variables["lookup"].Lookup.Cluster)
|
||||||
|
require.Equal(t, "1234-5678-abcd", b.Config.Variables["lookup"].Value)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestResolveLookupVariableReferencesInVariableLookups(t *testing.T) {
|
||||||
|
b := &bundle.Bundle{
|
||||||
|
Config: config.Root{
|
||||||
|
Variables: map[string]*variable.Variable{
|
||||||
|
"another_lookup": {
|
||||||
|
Lookup: &variable.Lookup{
|
||||||
|
Cluster: "cluster",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"lookup": {
|
||||||
|
Lookup: &variable.Lookup{
|
||||||
|
Cluster: "cluster-${var.another_lookup}",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
m := mocks.NewMockWorkspaceClient(t)
|
||||||
|
b.SetWorkpaceClient(m.WorkspaceClient)
|
||||||
|
|
||||||
|
diags := bundle.Apply(context.Background(), b, bundle.Seq(ResolveVariableReferencesInLookup(), ResolveResourceReferences()))
|
||||||
|
require.ErrorContains(t, diags.Error(), "lookup variables cannot contain references to another lookup variables")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNoResolveLookupIfVariableSetWithEnvVariable(t *testing.T) {
|
||||||
|
b := &bundle.Bundle{
|
||||||
|
Config: config.Root{
|
||||||
|
Bundle: config.Bundle{
|
||||||
|
Target: "dev",
|
||||||
|
},
|
||||||
|
Variables: map[string]*variable.Variable{
|
||||||
|
"lookup": {
|
||||||
|
Lookup: &variable.Lookup{
|
||||||
|
Cluster: "cluster-${bundle.target}",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
m := mocks.NewMockWorkspaceClient(t)
|
||||||
|
b.SetWorkpaceClient(m.WorkspaceClient)
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
ctx = env.Set(ctx, "BUNDLE_VAR_lookup", "1234-5678-abcd")
|
||||||
|
|
||||||
|
diags := bundle.Apply(ctx, b, bundle.Seq(SetVariables(), ResolveVariableReferencesInLookup(), ResolveResourceReferences()))
|
||||||
|
require.NoError(t, diags.Error())
|
||||||
|
require.Equal(t, "1234-5678-abcd", b.Config.Variables["lookup"].Value)
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,193 @@
|
||||||
|
package mutator
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/databricks/cli/bundle"
|
||||||
|
"github.com/databricks/cli/bundle/config/variable"
|
||||||
|
"github.com/databricks/cli/libs/diag"
|
||||||
|
"github.com/databricks/cli/libs/dyn"
|
||||||
|
"github.com/databricks/cli/libs/dyn/convert"
|
||||||
|
"github.com/databricks/cli/libs/dyn/dynvar"
|
||||||
|
"github.com/databricks/cli/libs/log"
|
||||||
|
)
|
||||||
|
|
||||||
|
type resolveVariableReferences struct {
|
||||||
|
prefixes []string
|
||||||
|
pattern dyn.Pattern
|
||||||
|
lookupFn func(dyn.Value, dyn.Path) (dyn.Value, error)
|
||||||
|
skipFn func(dyn.Value) bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func ResolveVariableReferences(prefixes ...string) bundle.Mutator {
|
||||||
|
return &resolveVariableReferences{prefixes: prefixes, lookupFn: lookup}
|
||||||
|
}
|
||||||
|
|
||||||
|
func ResolveVariableReferencesInLookup() bundle.Mutator {
|
||||||
|
return &resolveVariableReferences{prefixes: []string{
|
||||||
|
"bundle",
|
||||||
|
"workspace",
|
||||||
|
"variables",
|
||||||
|
}, pattern: dyn.NewPattern(dyn.Key("variables"), dyn.AnyKey(), dyn.Key("lookup")), lookupFn: lookupForVariables}
|
||||||
|
}
|
||||||
|
|
||||||
|
func ResolveVariableReferencesInComplexVariables() bundle.Mutator {
|
||||||
|
return &resolveVariableReferences{prefixes: []string{
|
||||||
|
"bundle",
|
||||||
|
"workspace",
|
||||||
|
"variables",
|
||||||
|
},
|
||||||
|
pattern: dyn.NewPattern(dyn.Key("variables"), dyn.AnyKey(), dyn.Key("value")),
|
||||||
|
lookupFn: lookupForComplexVariables,
|
||||||
|
skipFn: skipResolvingInNonComplexVariables,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func lookup(v dyn.Value, path dyn.Path) (dyn.Value, error) {
|
||||||
|
// Future opportunity: if we lookup this path in both the given root
|
||||||
|
// and the synthesized root, we know if it was explicitly set or implied to be empty.
|
||||||
|
// Then we can emit a warning if it was not explicitly set.
|
||||||
|
return dyn.GetByPath(v, path)
|
||||||
|
}
|
||||||
|
|
||||||
|
func lookupForComplexVariables(v dyn.Value, path dyn.Path) (dyn.Value, error) {
|
||||||
|
if path[0].Key() != "variables" {
|
||||||
|
return lookup(v, path)
|
||||||
|
}
|
||||||
|
|
||||||
|
varV, err := dyn.GetByPath(v, path[:len(path)-1])
|
||||||
|
if err != nil {
|
||||||
|
return dyn.InvalidValue, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var vv variable.Variable
|
||||||
|
err = convert.ToTyped(&vv, varV)
|
||||||
|
if err != nil {
|
||||||
|
return dyn.InvalidValue, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if vv.Type == variable.VariableTypeComplex {
|
||||||
|
return dyn.InvalidValue, fmt.Errorf("complex variables cannot contain references to another complex variables")
|
||||||
|
}
|
||||||
|
|
||||||
|
return lookup(v, path)
|
||||||
|
}
|
||||||
|
|
||||||
|
func skipResolvingInNonComplexVariables(v dyn.Value) bool {
|
||||||
|
switch v.Kind() {
|
||||||
|
case dyn.KindMap, dyn.KindSequence:
|
||||||
|
return false
|
||||||
|
default:
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func lookupForVariables(v dyn.Value, path dyn.Path) (dyn.Value, error) {
|
||||||
|
if path[0].Key() != "variables" {
|
||||||
|
return lookup(v, path)
|
||||||
|
}
|
||||||
|
|
||||||
|
varV, err := dyn.GetByPath(v, path[:len(path)-1])
|
||||||
|
if err != nil {
|
||||||
|
return dyn.InvalidValue, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var vv variable.Variable
|
||||||
|
err = convert.ToTyped(&vv, varV)
|
||||||
|
if err != nil {
|
||||||
|
return dyn.InvalidValue, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if vv.Lookup != nil && vv.Lookup.String() != "" {
|
||||||
|
return dyn.InvalidValue, fmt.Errorf("lookup variables cannot contain references to another lookup variables")
|
||||||
|
}
|
||||||
|
|
||||||
|
return lookup(v, path)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*resolveVariableReferences) Name() string {
|
||||||
|
return "ResolveVariableReferences"
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *resolveVariableReferences) Validate(ctx context.Context, b *bundle.Bundle) error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *resolveVariableReferences) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||||
|
prefixes := make([]dyn.Path, len(m.prefixes))
|
||||||
|
for i, prefix := range m.prefixes {
|
||||||
|
prefixes[i] = dyn.MustPathFromString(prefix)
|
||||||
|
}
|
||||||
|
|
||||||
|
// The path ${var.foo} is a shorthand for ${variables.foo.value}.
|
||||||
|
// We rewrite it here to make the resolution logic simpler.
|
||||||
|
varPath := dyn.NewPath(dyn.Key("var"))
|
||||||
|
|
||||||
|
err := b.Config.Mutate(func(root dyn.Value) (dyn.Value, error) {
|
||||||
|
// Synthesize a copy of the root that has all fields that are present in the type
|
||||||
|
// but not set in the dynamic value set to their corresponding empty value.
|
||||||
|
// This enables users to interpolate variable references to fields that haven't
|
||||||
|
// been explicitly set in the dynamic value.
|
||||||
|
//
|
||||||
|
// For example: ${bundle.git.origin_url} should resolve to an empty string
|
||||||
|
// if a bundle isn't located in a Git repository (yet).
|
||||||
|
//
|
||||||
|
// This is consistent with the behavior prior to using the dynamic value system.
|
||||||
|
//
|
||||||
|
// We can ignore the diagnostics return value because we know that the dynamic value
|
||||||
|
// has already been normalized when it was first loaded from the configuration file.
|
||||||
|
//
|
||||||
|
normalized, _ := convert.Normalize(b.Config, root, convert.IncludeMissingFields)
|
||||||
|
|
||||||
|
// If the pattern is nil, we resolve references in the entire configuration.
|
||||||
|
root, err := dyn.MapByPattern(root, m.pattern, func(p dyn.Path, v dyn.Value) (dyn.Value, error) {
|
||||||
|
// Resolve variable references in all values.
|
||||||
|
return dynvar.Resolve(v, func(path dyn.Path) (dyn.Value, error) {
|
||||||
|
// Rewrite the shorthand path ${var.foo} into ${variables.foo.value}.
|
||||||
|
if path.HasPrefix(varPath) {
|
||||||
|
newPath := dyn.NewPath(
|
||||||
|
dyn.Key("variables"),
|
||||||
|
path[1],
|
||||||
|
dyn.Key("value"),
|
||||||
|
)
|
||||||
|
|
||||||
|
if len(path) > 2 {
|
||||||
|
newPath = newPath.Append(path[2:]...)
|
||||||
|
}
|
||||||
|
|
||||||
|
path = newPath
|
||||||
|
}
|
||||||
|
|
||||||
|
// Perform resolution only if the path starts with one of the specified prefixes.
|
||||||
|
for _, prefix := range prefixes {
|
||||||
|
if path.HasPrefix(prefix) {
|
||||||
|
// Skip resolution if there is a skip function and it returns true.
|
||||||
|
if m.skipFn != nil && m.skipFn(v) {
|
||||||
|
return dyn.InvalidValue, dynvar.ErrSkipResolution
|
||||||
|
}
|
||||||
|
return m.lookupFn(normalized, path)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return dyn.InvalidValue, dynvar.ErrSkipResolution
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return dyn.InvalidValue, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Normalize the result because variable resolution may have been applied to non-string fields.
|
||||||
|
// For example, a variable reference may have been resolved to a integer.
|
||||||
|
root, diags := convert.Normalize(b.Config, root)
|
||||||
|
for _, diag := range diags {
|
||||||
|
// This occurs when a variable's resolved value is incompatible with the field's type.
|
||||||
|
// Log a warning until we have a better way to surface these diagnostics to the user.
|
||||||
|
log.Warnf(ctx, "normalization diagnostic: %s", diag.Summary)
|
||||||
|
}
|
||||||
|
return root, nil
|
||||||
|
})
|
||||||
|
|
||||||
|
return diag.FromErr(err)
|
||||||
|
}
|
|
@ -0,0 +1,436 @@
|
||||||
|
package mutator
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/databricks/cli/bundle"
|
||||||
|
"github.com/databricks/cli/bundle/config"
|
||||||
|
"github.com/databricks/cli/bundle/config/resources"
|
||||||
|
"github.com/databricks/cli/bundle/config/variable"
|
||||||
|
"github.com/databricks/cli/libs/diag"
|
||||||
|
"github.com/databricks/cli/libs/dyn"
|
||||||
|
"github.com/databricks/databricks-sdk-go/service/compute"
|
||||||
|
"github.com/databricks/databricks-sdk-go/service/jobs"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestResolveVariableReferences(t *testing.T) {
|
||||||
|
b := &bundle.Bundle{
|
||||||
|
Config: config.Root{
|
||||||
|
Bundle: config.Bundle{
|
||||||
|
Name: "example",
|
||||||
|
},
|
||||||
|
Workspace: config.Workspace{
|
||||||
|
RootPath: "${bundle.name}/bar",
|
||||||
|
FilePath: "${workspace.root_path}/baz",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
// Apply with an invalid prefix. This should not change the workspace root path.
|
||||||
|
diags := bundle.Apply(context.Background(), b, ResolveVariableReferences("doesntexist"))
|
||||||
|
require.NoError(t, diags.Error())
|
||||||
|
require.Equal(t, "${bundle.name}/bar", b.Config.Workspace.RootPath)
|
||||||
|
require.Equal(t, "${workspace.root_path}/baz", b.Config.Workspace.FilePath)
|
||||||
|
|
||||||
|
// Apply with a valid prefix. This should change the workspace root path.
|
||||||
|
diags = bundle.Apply(context.Background(), b, ResolveVariableReferences("bundle", "workspace"))
|
||||||
|
require.NoError(t, diags.Error())
|
||||||
|
require.Equal(t, "example/bar", b.Config.Workspace.RootPath)
|
||||||
|
require.Equal(t, "example/bar/baz", b.Config.Workspace.FilePath)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestResolveVariableReferencesToBundleVariables(t *testing.T) {
|
||||||
|
b := &bundle.Bundle{
|
||||||
|
Config: config.Root{
|
||||||
|
Bundle: config.Bundle{
|
||||||
|
Name: "example",
|
||||||
|
},
|
||||||
|
Workspace: config.Workspace{
|
||||||
|
RootPath: "${bundle.name}/${var.foo}",
|
||||||
|
},
|
||||||
|
Variables: map[string]*variable.Variable{
|
||||||
|
"foo": {
|
||||||
|
Value: "bar",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
// Apply with a valid prefix. This should change the workspace root path.
|
||||||
|
diags := bundle.Apply(context.Background(), b, ResolveVariableReferences("bundle", "variables"))
|
||||||
|
require.NoError(t, diags.Error())
|
||||||
|
require.Equal(t, "example/bar", b.Config.Workspace.RootPath)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestResolveVariableReferencesToEmptyFields(t *testing.T) {
|
||||||
|
b := &bundle.Bundle{
|
||||||
|
Config: config.Root{
|
||||||
|
Bundle: config.Bundle{
|
||||||
|
Name: "example",
|
||||||
|
Git: config.Git{
|
||||||
|
Branch: "",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Resources: config.Resources{
|
||||||
|
Jobs: map[string]*resources.Job{
|
||||||
|
"job1": {
|
||||||
|
JobSettings: &jobs.JobSettings{
|
||||||
|
Tags: map[string]string{
|
||||||
|
"git_branch": "${bundle.git.branch}",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
// Apply for the bundle prefix.
|
||||||
|
diags := bundle.Apply(context.Background(), b, ResolveVariableReferences("bundle"))
|
||||||
|
require.NoError(t, diags.Error())
|
||||||
|
|
||||||
|
// The job settings should have been interpolated to an empty string.
|
||||||
|
require.Equal(t, "", b.Config.Resources.Jobs["job1"].JobSettings.Tags["git_branch"])
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestResolveVariableReferencesForPrimitiveNonStringFields(t *testing.T) {
|
||||||
|
var diags diag.Diagnostics
|
||||||
|
|
||||||
|
b := &bundle.Bundle{
|
||||||
|
Config: config.Root{
|
||||||
|
Variables: map[string]*variable.Variable{
|
||||||
|
"no_alert_for_canceled_runs": {},
|
||||||
|
"no_alert_for_skipped_runs": {},
|
||||||
|
"min_workers": {},
|
||||||
|
"max_workers": {},
|
||||||
|
"spot_bid_max_price": {},
|
||||||
|
},
|
||||||
|
Resources: config.Resources{
|
||||||
|
Jobs: map[string]*resources.Job{
|
||||||
|
"job1": {
|
||||||
|
JobSettings: &jobs.JobSettings{
|
||||||
|
NotificationSettings: &jobs.JobNotificationSettings{
|
||||||
|
NoAlertForCanceledRuns: false,
|
||||||
|
NoAlertForSkippedRuns: false,
|
||||||
|
},
|
||||||
|
Tasks: []jobs.Task{
|
||||||
|
{
|
||||||
|
NewCluster: &compute.ClusterSpec{
|
||||||
|
Autoscale: &compute.AutoScale{
|
||||||
|
MinWorkers: 0,
|
||||||
|
MaxWorkers: 0,
|
||||||
|
},
|
||||||
|
AzureAttributes: &compute.AzureAttributes{
|
||||||
|
SpotBidMaxPrice: 0.0,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
|
// Initialize the variables.
|
||||||
|
diags = bundle.ApplyFunc(ctx, b, func(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||||
|
err := b.Config.InitializeVariables([]string{
|
||||||
|
"no_alert_for_canceled_runs=true",
|
||||||
|
"no_alert_for_skipped_runs=true",
|
||||||
|
"min_workers=1",
|
||||||
|
"max_workers=2",
|
||||||
|
"spot_bid_max_price=0.5",
|
||||||
|
})
|
||||||
|
return diag.FromErr(err)
|
||||||
|
})
|
||||||
|
require.NoError(t, diags.Error())
|
||||||
|
|
||||||
|
// Assign the variables to the dynamic configuration.
|
||||||
|
diags = bundle.ApplyFunc(ctx, b, func(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||||
|
err := b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) {
|
||||||
|
var p dyn.Path
|
||||||
|
var err error
|
||||||
|
|
||||||
|
// Set the notification settings.
|
||||||
|
p = dyn.MustPathFromString("resources.jobs.job1.notification_settings")
|
||||||
|
v, err = dyn.SetByPath(v, p.Append(dyn.Key("no_alert_for_canceled_runs")), dyn.V("${var.no_alert_for_canceled_runs}"))
|
||||||
|
require.NoError(t, err)
|
||||||
|
v, err = dyn.SetByPath(v, p.Append(dyn.Key("no_alert_for_skipped_runs")), dyn.V("${var.no_alert_for_skipped_runs}"))
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Set the min and max workers.
|
||||||
|
p = dyn.MustPathFromString("resources.jobs.job1.tasks[0].new_cluster.autoscale")
|
||||||
|
v, err = dyn.SetByPath(v, p.Append(dyn.Key("min_workers")), dyn.V("${var.min_workers}"))
|
||||||
|
require.NoError(t, err)
|
||||||
|
v, err = dyn.SetByPath(v, p.Append(dyn.Key("max_workers")), dyn.V("${var.max_workers}"))
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Set the spot bid max price.
|
||||||
|
p = dyn.MustPathFromString("resources.jobs.job1.tasks[0].new_cluster.azure_attributes")
|
||||||
|
v, err = dyn.SetByPath(v, p.Append(dyn.Key("spot_bid_max_price")), dyn.V("${var.spot_bid_max_price}"))
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
return v, nil
|
||||||
|
})
|
||||||
|
return diag.FromErr(err)
|
||||||
|
})
|
||||||
|
require.NoError(t, diags.Error())
|
||||||
|
|
||||||
|
// Apply for the variable prefix. This should resolve the variables to their values.
|
||||||
|
diags = bundle.Apply(context.Background(), b, ResolveVariableReferences("variables"))
|
||||||
|
require.NoError(t, diags.Error())
|
||||||
|
assert.Equal(t, true, b.Config.Resources.Jobs["job1"].JobSettings.NotificationSettings.NoAlertForCanceledRuns)
|
||||||
|
assert.Equal(t, true, b.Config.Resources.Jobs["job1"].JobSettings.NotificationSettings.NoAlertForSkippedRuns)
|
||||||
|
assert.Equal(t, 1, b.Config.Resources.Jobs["job1"].JobSettings.Tasks[0].NewCluster.Autoscale.MinWorkers)
|
||||||
|
assert.Equal(t, 2, b.Config.Resources.Jobs["job1"].JobSettings.Tasks[0].NewCluster.Autoscale.MaxWorkers)
|
||||||
|
assert.Equal(t, 0.5, b.Config.Resources.Jobs["job1"].JobSettings.Tasks[0].NewCluster.AzureAttributes.SpotBidMaxPrice)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestResolveComplexVariable(t *testing.T) {
|
||||||
|
b := &bundle.Bundle{
|
||||||
|
Config: config.Root{
|
||||||
|
Bundle: config.Bundle{
|
||||||
|
Name: "example",
|
||||||
|
},
|
||||||
|
Variables: map[string]*variable.Variable{
|
||||||
|
"cluster": {
|
||||||
|
Value: map[string]any{
|
||||||
|
"node_type_id": "Standard_DS3_v2",
|
||||||
|
"num_workers": 2,
|
||||||
|
},
|
||||||
|
Type: variable.VariableTypeComplex,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
|
||||||
|
Resources: config.Resources{
|
||||||
|
Jobs: map[string]*resources.Job{
|
||||||
|
"job1": {
|
||||||
|
JobSettings: &jobs.JobSettings{
|
||||||
|
JobClusters: []jobs.JobCluster{
|
||||||
|
{
|
||||||
|
NewCluster: compute.ClusterSpec{
|
||||||
|
NodeTypeId: "random",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
|
// Assign the variables to the dynamic configuration.
|
||||||
|
diags := bundle.ApplyFunc(ctx, b, func(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||||
|
err := b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) {
|
||||||
|
var p dyn.Path
|
||||||
|
var err error
|
||||||
|
|
||||||
|
p = dyn.MustPathFromString("resources.jobs.job1.job_clusters[0]")
|
||||||
|
v, err = dyn.SetByPath(v, p.Append(dyn.Key("new_cluster")), dyn.V("${var.cluster}"))
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
return v, nil
|
||||||
|
})
|
||||||
|
return diag.FromErr(err)
|
||||||
|
})
|
||||||
|
require.NoError(t, diags.Error())
|
||||||
|
|
||||||
|
diags = bundle.Apply(ctx, b, ResolveVariableReferences("bundle", "workspace", "variables"))
|
||||||
|
require.NoError(t, diags.Error())
|
||||||
|
require.Equal(t, "Standard_DS3_v2", b.Config.Resources.Jobs["job1"].JobSettings.JobClusters[0].NewCluster.NodeTypeId)
|
||||||
|
require.Equal(t, 2, b.Config.Resources.Jobs["job1"].JobSettings.JobClusters[0].NewCluster.NumWorkers)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestResolveComplexVariableReferencesToFields(t *testing.T) {
|
||||||
|
b := &bundle.Bundle{
|
||||||
|
Config: config.Root{
|
||||||
|
Bundle: config.Bundle{
|
||||||
|
Name: "example",
|
||||||
|
},
|
||||||
|
Variables: map[string]*variable.Variable{
|
||||||
|
"cluster": {
|
||||||
|
Value: map[string]any{
|
||||||
|
"node_type_id": "Standard_DS3_v2",
|
||||||
|
"num_workers": 2,
|
||||||
|
},
|
||||||
|
Type: variable.VariableTypeComplex,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
|
||||||
|
Resources: config.Resources{
|
||||||
|
Jobs: map[string]*resources.Job{
|
||||||
|
"job1": {
|
||||||
|
JobSettings: &jobs.JobSettings{
|
||||||
|
JobClusters: []jobs.JobCluster{
|
||||||
|
{
|
||||||
|
NewCluster: compute.ClusterSpec{
|
||||||
|
NodeTypeId: "random",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
|
// Assign the variables to the dynamic configuration.
|
||||||
|
diags := bundle.ApplyFunc(ctx, b, func(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||||
|
err := b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) {
|
||||||
|
var p dyn.Path
|
||||||
|
var err error
|
||||||
|
|
||||||
|
p = dyn.MustPathFromString("resources.jobs.job1.job_clusters[0].new_cluster")
|
||||||
|
v, err = dyn.SetByPath(v, p.Append(dyn.Key("node_type_id")), dyn.V("${var.cluster.node_type_id}"))
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
return v, nil
|
||||||
|
})
|
||||||
|
return diag.FromErr(err)
|
||||||
|
})
|
||||||
|
require.NoError(t, diags.Error())
|
||||||
|
|
||||||
|
diags = bundle.Apply(ctx, b, ResolveVariableReferences("bundle", "workspace", "variables"))
|
||||||
|
require.NoError(t, diags.Error())
|
||||||
|
require.Equal(t, "Standard_DS3_v2", b.Config.Resources.Jobs["job1"].JobSettings.JobClusters[0].NewCluster.NodeTypeId)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestResolveComplexVariableReferencesWithComplexVariablesError(t *testing.T) {
|
||||||
|
b := &bundle.Bundle{
|
||||||
|
Config: config.Root{
|
||||||
|
Bundle: config.Bundle{
|
||||||
|
Name: "example",
|
||||||
|
},
|
||||||
|
Variables: map[string]*variable.Variable{
|
||||||
|
"cluster": {
|
||||||
|
Value: map[string]any{
|
||||||
|
"node_type_id": "Standard_DS3_v2",
|
||||||
|
"num_workers": 2,
|
||||||
|
"spark_conf": "${var.spark_conf}",
|
||||||
|
},
|
||||||
|
Type: variable.VariableTypeComplex,
|
||||||
|
},
|
||||||
|
"spark_conf": {
|
||||||
|
Value: map[string]any{
|
||||||
|
"spark.executor.memory": "4g",
|
||||||
|
"spark.executor.cores": "2",
|
||||||
|
},
|
||||||
|
Type: variable.VariableTypeComplex,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
|
||||||
|
Resources: config.Resources{
|
||||||
|
Jobs: map[string]*resources.Job{
|
||||||
|
"job1": {
|
||||||
|
JobSettings: &jobs.JobSettings{
|
||||||
|
JobClusters: []jobs.JobCluster{
|
||||||
|
{
|
||||||
|
NewCluster: compute.ClusterSpec{
|
||||||
|
NodeTypeId: "random",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
|
// Assign the variables to the dynamic configuration.
|
||||||
|
diags := bundle.ApplyFunc(ctx, b, func(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||||
|
err := b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) {
|
||||||
|
var p dyn.Path
|
||||||
|
var err error
|
||||||
|
|
||||||
|
p = dyn.MustPathFromString("resources.jobs.job1.job_clusters[0]")
|
||||||
|
v, err = dyn.SetByPath(v, p.Append(dyn.Key("new_cluster")), dyn.V("${var.cluster}"))
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
return v, nil
|
||||||
|
})
|
||||||
|
return diag.FromErr(err)
|
||||||
|
})
|
||||||
|
require.NoError(t, diags.Error())
|
||||||
|
|
||||||
|
diags = bundle.Apply(ctx, b, bundle.Seq(ResolveVariableReferencesInComplexVariables(), ResolveVariableReferences("bundle", "workspace", "variables")))
|
||||||
|
require.ErrorContains(t, diags.Error(), "complex variables cannot contain references to another complex variables")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestResolveComplexVariableWithVarReference(t *testing.T) {
|
||||||
|
b := &bundle.Bundle{
|
||||||
|
Config: config.Root{
|
||||||
|
Bundle: config.Bundle{
|
||||||
|
Name: "example",
|
||||||
|
},
|
||||||
|
Variables: map[string]*variable.Variable{
|
||||||
|
"package_version": {
|
||||||
|
Value: "1.0.0",
|
||||||
|
},
|
||||||
|
"cluster_libraries": {
|
||||||
|
Value: [](map[string]any){
|
||||||
|
{
|
||||||
|
"pypi": map[string]string{
|
||||||
|
"package": "cicd_template==${var.package_version}",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Type: variable.VariableTypeComplex,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
|
||||||
|
Resources: config.Resources{
|
||||||
|
Jobs: map[string]*resources.Job{
|
||||||
|
"job1": {
|
||||||
|
JobSettings: &jobs.JobSettings{
|
||||||
|
Tasks: []jobs.Task{
|
||||||
|
{
|
||||||
|
Libraries: []compute.Library{},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
|
// Assign the variables to the dynamic configuration.
|
||||||
|
diags := bundle.ApplyFunc(ctx, b, func(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||||
|
err := b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) {
|
||||||
|
var p dyn.Path
|
||||||
|
var err error
|
||||||
|
|
||||||
|
p = dyn.MustPathFromString("resources.jobs.job1.tasks[0]")
|
||||||
|
v, err = dyn.SetByPath(v, p.Append(dyn.Key("libraries")), dyn.V("${var.cluster_libraries}"))
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
return v, nil
|
||||||
|
})
|
||||||
|
return diag.FromErr(err)
|
||||||
|
})
|
||||||
|
require.NoError(t, diags.Error())
|
||||||
|
|
||||||
|
diags = bundle.Apply(ctx, b, bundle.Seq(
|
||||||
|
ResolveVariableReferencesInComplexVariables(),
|
||||||
|
ResolveVariableReferences("bundle", "workspace", "variables"),
|
||||||
|
))
|
||||||
|
require.NoError(t, diags.Error())
|
||||||
|
require.Equal(t, "cicd_template==1.0.0", b.Config.Resources.Jobs["job1"].JobSettings.Tasks[0].Libraries[0].Pypi.Package)
|
||||||
|
}
|
|
@ -0,0 +1,61 @@
|
||||||
|
package mutator
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"path/filepath"
|
||||||
|
|
||||||
|
"github.com/databricks/cli/bundle"
|
||||||
|
|
||||||
|
"github.com/databricks/cli/libs/diag"
|
||||||
|
"github.com/databricks/cli/libs/dyn"
|
||||||
|
)
|
||||||
|
|
||||||
|
type rewriteSyncPaths struct{}
|
||||||
|
|
||||||
|
func RewriteSyncPaths() bundle.Mutator {
|
||||||
|
return &rewriteSyncPaths{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *rewriteSyncPaths) Name() string {
|
||||||
|
return "RewriteSyncPaths"
|
||||||
|
}
|
||||||
|
|
||||||
|
// makeRelativeTo returns a dyn.MapFunc that joins the relative path
|
||||||
|
// of the file it was defined in w.r.t. the bundle root path, with
|
||||||
|
// the contents of the string node.
|
||||||
|
//
|
||||||
|
// For example:
|
||||||
|
// - The bundle root is /foo
|
||||||
|
// - The configuration file that defines the string node is at /foo/bar/baz.yml
|
||||||
|
// - The string node contains "somefile.*"
|
||||||
|
//
|
||||||
|
// Then the resulting value will be "bar/somefile.*".
|
||||||
|
func (m *rewriteSyncPaths) makeRelativeTo(root string) dyn.MapFunc {
|
||||||
|
return func(_ dyn.Path, v dyn.Value) (dyn.Value, error) {
|
||||||
|
dir := filepath.Dir(v.Location().File)
|
||||||
|
rel, err := filepath.Rel(root, dir)
|
||||||
|
if err != nil {
|
||||||
|
return dyn.InvalidValue, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return dyn.NewValue(filepath.Join(rel, v.MustString()), v.Locations()), nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *rewriteSyncPaths) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||||
|
err := b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) {
|
||||||
|
return dyn.Map(v, "sync", func(_ dyn.Path, v dyn.Value) (nv dyn.Value, err error) {
|
||||||
|
v, err = dyn.Map(v, "include", dyn.Foreach(m.makeRelativeTo(b.RootPath)))
|
||||||
|
if err != nil {
|
||||||
|
return dyn.InvalidValue, err
|
||||||
|
}
|
||||||
|
v, err = dyn.Map(v, "exclude", dyn.Foreach(m.makeRelativeTo(b.RootPath)))
|
||||||
|
if err != nil {
|
||||||
|
return dyn.InvalidValue, err
|
||||||
|
}
|
||||||
|
return v, nil
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
return diag.FromErr(err)
|
||||||
|
}
|
|
@ -0,0 +1,101 @@
|
||||||
|
package mutator_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"path/filepath"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/databricks/cli/bundle"
|
||||||
|
"github.com/databricks/cli/bundle/config"
|
||||||
|
"github.com/databricks/cli/bundle/config/mutator"
|
||||||
|
"github.com/databricks/cli/bundle/internal/bundletest"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestRewriteSyncPathsRelative(t *testing.T) {
|
||||||
|
b := &bundle.Bundle{
|
||||||
|
RootPath: ".",
|
||||||
|
Config: config.Root{
|
||||||
|
Sync: config.Sync{
|
||||||
|
Include: []string{
|
||||||
|
"foo",
|
||||||
|
"bar",
|
||||||
|
},
|
||||||
|
Exclude: []string{
|
||||||
|
"baz",
|
||||||
|
"qux",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
bundletest.SetLocation(b, "sync.include[0]", "./file.yml")
|
||||||
|
bundletest.SetLocation(b, "sync.include[1]", "./a/file.yml")
|
||||||
|
bundletest.SetLocation(b, "sync.exclude[0]", "./a/b/file.yml")
|
||||||
|
bundletest.SetLocation(b, "sync.exclude[1]", "./a/b/c/file.yml")
|
||||||
|
|
||||||
|
diags := bundle.Apply(context.Background(), b, mutator.RewriteSyncPaths())
|
||||||
|
assert.NoError(t, diags.Error())
|
||||||
|
|
||||||
|
assert.Equal(t, filepath.Clean("foo"), b.Config.Sync.Include[0])
|
||||||
|
assert.Equal(t, filepath.Clean("a/bar"), b.Config.Sync.Include[1])
|
||||||
|
assert.Equal(t, filepath.Clean("a/b/baz"), b.Config.Sync.Exclude[0])
|
||||||
|
assert.Equal(t, filepath.Clean("a/b/c/qux"), b.Config.Sync.Exclude[1])
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRewriteSyncPathsAbsolute(t *testing.T) {
|
||||||
|
b := &bundle.Bundle{
|
||||||
|
RootPath: "/tmp/dir",
|
||||||
|
Config: config.Root{
|
||||||
|
Sync: config.Sync{
|
||||||
|
Include: []string{
|
||||||
|
"foo",
|
||||||
|
"bar",
|
||||||
|
},
|
||||||
|
Exclude: []string{
|
||||||
|
"baz",
|
||||||
|
"qux",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
bundletest.SetLocation(b, "sync.include[0]", "/tmp/dir/file.yml")
|
||||||
|
bundletest.SetLocation(b, "sync.include[1]", "/tmp/dir/a/file.yml")
|
||||||
|
bundletest.SetLocation(b, "sync.exclude[0]", "/tmp/dir/a/b/file.yml")
|
||||||
|
bundletest.SetLocation(b, "sync.exclude[1]", "/tmp/dir/a/b/c/file.yml")
|
||||||
|
|
||||||
|
diags := bundle.Apply(context.Background(), b, mutator.RewriteSyncPaths())
|
||||||
|
assert.NoError(t, diags.Error())
|
||||||
|
|
||||||
|
assert.Equal(t, filepath.Clean("foo"), b.Config.Sync.Include[0])
|
||||||
|
assert.Equal(t, filepath.Clean("a/bar"), b.Config.Sync.Include[1])
|
||||||
|
assert.Equal(t, filepath.Clean("a/b/baz"), b.Config.Sync.Exclude[0])
|
||||||
|
assert.Equal(t, filepath.Clean("a/b/c/qux"), b.Config.Sync.Exclude[1])
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRewriteSyncPathsErrorPaths(t *testing.T) {
|
||||||
|
t.Run("no sync block", func(t *testing.T) {
|
||||||
|
b := &bundle.Bundle{
|
||||||
|
RootPath: ".",
|
||||||
|
}
|
||||||
|
|
||||||
|
diags := bundle.Apply(context.Background(), b, mutator.RewriteSyncPaths())
|
||||||
|
assert.NoError(t, diags.Error())
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("empty include/exclude blocks", func(t *testing.T) {
|
||||||
|
b := &bundle.Bundle{
|
||||||
|
RootPath: ".",
|
||||||
|
Config: config.Root{
|
||||||
|
Sync: config.Sync{
|
||||||
|
Include: []string{},
|
||||||
|
Exclude: []string{},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
diags := bundle.Apply(context.Background(), b, mutator.RewriteSyncPaths())
|
||||||
|
assert.NoError(t, diags.Error())
|
||||||
|
})
|
||||||
|
}
|
|
@ -2,19 +2,26 @@ package mutator
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"fmt"
|
||||||
"slices"
|
"slices"
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle"
|
"github.com/databricks/cli/bundle"
|
||||||
"github.com/databricks/cli/bundle/config/resources"
|
"github.com/databricks/cli/bundle/config/resources"
|
||||||
|
"github.com/databricks/cli/libs/diag"
|
||||||
|
"github.com/databricks/cli/libs/dyn"
|
||||||
"github.com/databricks/databricks-sdk-go/service/jobs"
|
"github.com/databricks/databricks-sdk-go/service/jobs"
|
||||||
)
|
)
|
||||||
|
|
||||||
type setRunAs struct {
|
type setRunAs struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetRunAs mutator is used to go over defined resources such as Jobs and DLT Pipelines
|
// This mutator does two things:
|
||||||
// And set correct execution identity ("run_as" for a job or "is_owner" permission for DLT)
|
//
|
||||||
// if top-level "run-as" section is defined in the configuration.
|
// 1. Sets the run_as field for jobs to the value of the run_as field in the bundle.
|
||||||
|
//
|
||||||
|
// 2. Validates that the bundle run_as configuration is valid in the context of the bundle.
|
||||||
|
// If the run_as user is different from the current deployment user, DABs only
|
||||||
|
// supports a subset of resources.
|
||||||
func SetRunAs() bundle.Mutator {
|
func SetRunAs() bundle.Mutator {
|
||||||
return &setRunAs{}
|
return &setRunAs{}
|
||||||
}
|
}
|
||||||
|
@ -23,10 +30,99 @@ func (m *setRunAs) Name() string {
|
||||||
return "SetRunAs"
|
return "SetRunAs"
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *setRunAs) Apply(_ context.Context, b *bundle.Bundle) error {
|
type errUnsupportedResourceTypeForRunAs struct {
|
||||||
|
resourceType string
|
||||||
|
resourceLocation dyn.Location
|
||||||
|
currentUser string
|
||||||
|
runAsUser string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e errUnsupportedResourceTypeForRunAs) Error() string {
|
||||||
|
return fmt.Sprintf("%s are not supported when the current deployment user is different from the bundle's run_as identity. Please deploy as the run_as identity. Please refer to the documentation at https://docs.databricks.com/dev-tools/bundles/run-as.html for more details. Location of the unsupported resource: %s. Current identity: %s. Run as identity: %s", e.resourceType, e.resourceLocation, e.currentUser, e.runAsUser)
|
||||||
|
}
|
||||||
|
|
||||||
|
type errBothSpAndUserSpecified struct {
|
||||||
|
spName string
|
||||||
|
spLoc dyn.Location
|
||||||
|
userName string
|
||||||
|
userLoc dyn.Location
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e errBothSpAndUserSpecified) Error() string {
|
||||||
|
return fmt.Sprintf("run_as section must specify exactly one identity. A service_principal_name %q is specified at %s. A user_name %q is defined at %s", e.spName, e.spLoc, e.userName, e.userLoc)
|
||||||
|
}
|
||||||
|
|
||||||
|
func validateRunAs(b *bundle.Bundle) error {
|
||||||
|
neitherSpecifiedErr := fmt.Errorf("run_as section must specify exactly one identity. Neither service_principal_name nor user_name is specified at %s", b.Config.GetLocation("run_as"))
|
||||||
|
// Error if neither service_principal_name nor user_name are specified, but the
|
||||||
|
// run_as section is present.
|
||||||
|
if b.Config.Value().Get("run_as").Kind() == dyn.KindNil {
|
||||||
|
return neitherSpecifiedErr
|
||||||
|
}
|
||||||
|
// Error if one or both of service_principal_name and user_name are specified,
|
||||||
|
// but with empty values.
|
||||||
|
if b.Config.RunAs.ServicePrincipalName == "" && b.Config.RunAs.UserName == "" {
|
||||||
|
return neitherSpecifiedErr
|
||||||
|
}
|
||||||
|
|
||||||
|
// Error if both service_principal_name and user_name are specified
|
||||||
|
runAs := b.Config.RunAs
|
||||||
|
if runAs.UserName != "" && runAs.ServicePrincipalName != "" {
|
||||||
|
return errBothSpAndUserSpecified{
|
||||||
|
spName: runAs.ServicePrincipalName,
|
||||||
|
userName: runAs.UserName,
|
||||||
|
spLoc: b.Config.GetLocation("run_as.service_principal_name"),
|
||||||
|
userLoc: b.Config.GetLocation("run_as.user_name"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
identity := runAs.ServicePrincipalName
|
||||||
|
if identity == "" {
|
||||||
|
identity = runAs.UserName
|
||||||
|
}
|
||||||
|
|
||||||
|
// All resources are supported if the run_as identity is the same as the current deployment identity.
|
||||||
|
if identity == b.Config.Workspace.CurrentUser.UserName {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DLT pipelines do not support run_as in the API.
|
||||||
|
if len(b.Config.Resources.Pipelines) > 0 {
|
||||||
|
return errUnsupportedResourceTypeForRunAs{
|
||||||
|
resourceType: "pipelines",
|
||||||
|
resourceLocation: b.Config.GetLocation("resources.pipelines"),
|
||||||
|
currentUser: b.Config.Workspace.CurrentUser.UserName,
|
||||||
|
runAsUser: identity,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Model serving endpoints do not support run_as in the API.
|
||||||
|
if len(b.Config.Resources.ModelServingEndpoints) > 0 {
|
||||||
|
return errUnsupportedResourceTypeForRunAs{
|
||||||
|
resourceType: "model_serving_endpoints",
|
||||||
|
resourceLocation: b.Config.GetLocation("resources.model_serving_endpoints"),
|
||||||
|
currentUser: b.Config.Workspace.CurrentUser.UserName,
|
||||||
|
runAsUser: identity,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Monitors do not support run_as in the API.
|
||||||
|
if len(b.Config.Resources.QualityMonitors) > 0 {
|
||||||
|
return errUnsupportedResourceTypeForRunAs{
|
||||||
|
resourceType: "quality_monitors",
|
||||||
|
resourceLocation: b.Config.GetLocation("resources.quality_monitors"),
|
||||||
|
currentUser: b.Config.Workspace.CurrentUser.UserName,
|
||||||
|
runAsUser: identity,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func setRunAsForJobs(b *bundle.Bundle) {
|
||||||
runAs := b.Config.RunAs
|
runAs := b.Config.RunAs
|
||||||
if runAs == nil {
|
if runAs == nil {
|
||||||
return nil
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
for i := range b.Config.Resources.Jobs {
|
for i := range b.Config.Resources.Jobs {
|
||||||
|
@ -39,13 +135,22 @@ func (m *setRunAs) Apply(_ context.Context, b *bundle.Bundle) error {
|
||||||
UserName: runAs.UserName,
|
UserName: runAs.UserName,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Legacy behavior of run_as for DLT pipelines. Available under the experimental.use_run_as_legacy flag.
|
||||||
|
// Only available to unblock customers stuck due to breaking changes in https://github.com/databricks/cli/pull/1233
|
||||||
|
func setPipelineOwnersToRunAsIdentity(b *bundle.Bundle) {
|
||||||
|
runAs := b.Config.RunAs
|
||||||
|
if runAs == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
me := b.Config.Workspace.CurrentUser.UserName
|
me := b.Config.Workspace.CurrentUser.UserName
|
||||||
// If user deploying the bundle and the one defined in run_as are the same
|
// If user deploying the bundle and the one defined in run_as are the same
|
||||||
// Do not add IS_OWNER permission. Current user is implied to be an owner in this case.
|
// Do not add IS_OWNER permission. Current user is implied to be an owner in this case.
|
||||||
// Otherwise, it will fail due to this bug https://github.com/databricks/terraform-provider-databricks/issues/2407
|
// Otherwise, it will fail due to this bug https://github.com/databricks/terraform-provider-databricks/issues/2407
|
||||||
if runAs.UserName == me || runAs.ServicePrincipalName == me {
|
if runAs.UserName == me || runAs.ServicePrincipalName == me {
|
||||||
return nil
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
for i := range b.Config.Resources.Pipelines {
|
for i := range b.Config.Resources.Pipelines {
|
||||||
|
@ -60,6 +165,32 @@ func (m *setRunAs) Apply(_ context.Context, b *bundle.Bundle) error {
|
||||||
UserName: runAs.UserName,
|
UserName: runAs.UserName,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *setRunAs) Apply(_ context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||||
|
// Mutator is a no-op if run_as is not specified in the bundle
|
||||||
|
if b.Config.Value().Get("run_as").Kind() == dyn.KindInvalid {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if b.Config.Experimental != nil && b.Config.Experimental.UseLegacyRunAs {
|
||||||
|
setPipelineOwnersToRunAsIdentity(b)
|
||||||
|
setRunAsForJobs(b)
|
||||||
|
return diag.Diagnostics{
|
||||||
|
{
|
||||||
|
Severity: diag.Warning,
|
||||||
|
Summary: "You are using the legacy mode of run_as. The support for this mode is experimental and might be removed in a future release of the CLI. In order to run the DLT pipelines in your DAB as the run_as user this mode changes the owners of the pipelines to the run_as identity, which requires the user deploying the bundle to be a workspace admin, and also a Metastore admin if the pipeline target is in UC.",
|
||||||
|
Path: dyn.MustPathFromString("experimental.use_legacy_run_as"),
|
||||||
|
Location: b.Config.GetLocation("experimental.use_legacy_run_as"),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Assert the run_as configuration is valid in the context of the bundle
|
||||||
|
if err := validateRunAs(b); err != nil {
|
||||||
|
return diag.FromErr(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
setRunAsForJobs(b)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,194 @@
|
||||||
|
package mutator
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"slices"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/databricks/cli/bundle"
|
||||||
|
"github.com/databricks/cli/bundle/config"
|
||||||
|
"github.com/databricks/cli/bundle/config/resources"
|
||||||
|
"github.com/databricks/cli/libs/dyn"
|
||||||
|
"github.com/databricks/cli/libs/dyn/convert"
|
||||||
|
"github.com/databricks/databricks-sdk-go/service/iam"
|
||||||
|
"github.com/databricks/databricks-sdk-go/service/jobs"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func allResourceTypes(t *testing.T) []string {
|
||||||
|
// Compute supported resource types based on the `Resources{}` struct.
|
||||||
|
r := &config.Resources{}
|
||||||
|
rv, err := convert.FromTyped(r, dyn.NilValue)
|
||||||
|
require.NoError(t, err)
|
||||||
|
normalized, _ := convert.Normalize(r, rv, convert.IncludeMissingFields)
|
||||||
|
resourceTypes := []string{}
|
||||||
|
for _, k := range normalized.MustMap().Keys() {
|
||||||
|
resourceTypes = append(resourceTypes, k.MustString())
|
||||||
|
}
|
||||||
|
slices.Sort(resourceTypes)
|
||||||
|
|
||||||
|
// Assert the total list of resource supported, as a sanity check that using
|
||||||
|
// the dyn library gives us the correct list of all resources supported. Please
|
||||||
|
// also update this check when adding a new resource
|
||||||
|
require.Equal(t, []string{
|
||||||
|
"experiments",
|
||||||
|
"jobs",
|
||||||
|
"model_serving_endpoints",
|
||||||
|
"models",
|
||||||
|
"pipelines",
|
||||||
|
"quality_monitors",
|
||||||
|
"registered_models",
|
||||||
|
},
|
||||||
|
resourceTypes,
|
||||||
|
)
|
||||||
|
|
||||||
|
return resourceTypes
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRunAsWorksForAllowedResources(t *testing.T) {
|
||||||
|
config := config.Root{
|
||||||
|
Workspace: config.Workspace{
|
||||||
|
CurrentUser: &config.User{
|
||||||
|
User: &iam.User{
|
||||||
|
UserName: "alice",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
RunAs: &jobs.JobRunAs{
|
||||||
|
UserName: "bob",
|
||||||
|
},
|
||||||
|
Resources: config.Resources{
|
||||||
|
Jobs: map[string]*resources.Job{
|
||||||
|
"job_one": {
|
||||||
|
JobSettings: &jobs.JobSettings{
|
||||||
|
Name: "foo",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"job_two": {
|
||||||
|
JobSettings: &jobs.JobSettings{
|
||||||
|
Name: "bar",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"job_three": {
|
||||||
|
JobSettings: &jobs.JobSettings{
|
||||||
|
Name: "baz",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Models: map[string]*resources.MlflowModel{
|
||||||
|
"model_one": {},
|
||||||
|
},
|
||||||
|
RegisteredModels: map[string]*resources.RegisteredModel{
|
||||||
|
"registered_model_one": {},
|
||||||
|
},
|
||||||
|
Experiments: map[string]*resources.MlflowExperiment{
|
||||||
|
"experiment_one": {},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
b := &bundle.Bundle{
|
||||||
|
Config: config,
|
||||||
|
}
|
||||||
|
|
||||||
|
diags := bundle.Apply(context.Background(), b, SetRunAs())
|
||||||
|
assert.NoError(t, diags.Error())
|
||||||
|
|
||||||
|
for _, job := range b.Config.Resources.Jobs {
|
||||||
|
assert.Equal(t, "bob", job.RunAs.UserName)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRunAsErrorForUnsupportedResources(t *testing.T) {
|
||||||
|
// Bundle "run_as" has two modes of operation, each with a different set of
|
||||||
|
// resources that are supported.
|
||||||
|
// Cases:
|
||||||
|
// 1. When the bundle "run_as" identity is same as the current deployment
|
||||||
|
// identity. In this case all resources are supported.
|
||||||
|
// 2. When the bundle "run_as" identity is different from the current
|
||||||
|
// deployment identity. In this case only a subset of resources are
|
||||||
|
// supported. This subset of resources are defined in the allow list below.
|
||||||
|
//
|
||||||
|
// To be a part of the allow list, the resource must satisfy one of the following
|
||||||
|
// two conditions:
|
||||||
|
// 1. The resource supports setting a run_as identity to a different user
|
||||||
|
// from the owner/creator of the resource. For example, jobs.
|
||||||
|
// 2. Run as semantics do not apply to the resource. We do not plan to add
|
||||||
|
// platform side support for `run_as` for these resources. For example,
|
||||||
|
// experiments or registered models.
|
||||||
|
//
|
||||||
|
// Any resource that is not on the allow list cannot be used when the bundle
|
||||||
|
// run_as is different from the current deployment user. "bundle validate" must
|
||||||
|
// return an error if such a resource has been defined, and the run_as identity
|
||||||
|
// is different from the current deployment identity.
|
||||||
|
//
|
||||||
|
// Action Item: If you are adding a new resource to DABs, please check in with
|
||||||
|
// the relevant owning team whether the resource should be on the allow list or (implicitly) on
|
||||||
|
// the deny list. Any resources that could have run_as semantics in the future
|
||||||
|
// should be on the deny list.
|
||||||
|
// For example: Teams for pipelines, model serving endpoints or Lakeview dashboards
|
||||||
|
// are planning to add platform side support for `run_as` for these resources at
|
||||||
|
// some point in the future. These resources are (implicitly) on the deny list, since
|
||||||
|
// they are not on the allow list below.
|
||||||
|
allowList := []string{
|
||||||
|
"jobs",
|
||||||
|
"models",
|
||||||
|
"registered_models",
|
||||||
|
"experiments",
|
||||||
|
}
|
||||||
|
|
||||||
|
base := config.Root{
|
||||||
|
Workspace: config.Workspace{
|
||||||
|
CurrentUser: &config.User{
|
||||||
|
User: &iam.User{
|
||||||
|
UserName: "alice",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
RunAs: &jobs.JobRunAs{
|
||||||
|
UserName: "bob",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
v, err := convert.FromTyped(base, dyn.NilValue)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Define top level resources key in the bundle configuration.
|
||||||
|
// This is not part of the typed configuration, so we need to add it manually.
|
||||||
|
v, err = dyn.Set(v, "resources", dyn.V(map[string]dyn.Value{}))
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
for _, rt := range allResourceTypes(t) {
|
||||||
|
// Skip allowed resources
|
||||||
|
if slices.Contains(allowList, rt) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add an instance of the resource type that is not on the allow list to
|
||||||
|
// the bundle configuration.
|
||||||
|
nv, err := dyn.SetByPath(v, dyn.NewPath(dyn.Key("resources"), dyn.Key(rt)), dyn.V(map[string]dyn.Value{
|
||||||
|
"foo": dyn.V(map[string]dyn.Value{
|
||||||
|
"path": dyn.V("bar"),
|
||||||
|
}),
|
||||||
|
}))
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Get back typed configuration from the newly created invalid bundle configuration.
|
||||||
|
r := &config.Root{}
|
||||||
|
err = convert.ToTyped(r, nv)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Assert this invalid bundle configuration fails validation.
|
||||||
|
b := &bundle.Bundle{
|
||||||
|
Config: *r,
|
||||||
|
}
|
||||||
|
diags := bundle.Apply(context.Background(), b, SetRunAs())
|
||||||
|
assert.Equal(t, diags.Error().Error(), errUnsupportedResourceTypeForRunAs{
|
||||||
|
resourceType: rt,
|
||||||
|
resourceLocation: dyn.Location{},
|
||||||
|
currentUser: "alice",
|
||||||
|
runAsUser: "bob",
|
||||||
|
}.Error(), "expected run_as with a different identity than the current deployment user to not supported for resources of type: %s", rt)
|
||||||
|
}
|
||||||
|
}
|
|
@ -2,10 +2,10 @@ package mutator
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle"
|
"github.com/databricks/cli/bundle"
|
||||||
|
"github.com/databricks/cli/libs/diag"
|
||||||
"golang.org/x/exp/maps"
|
"golang.org/x/exp/maps"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -20,9 +20,9 @@ func (m *selectDefaultTarget) Name() string {
|
||||||
return "SelectDefaultTarget"
|
return "SelectDefaultTarget"
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *selectDefaultTarget) Apply(ctx context.Context, b *bundle.Bundle) error {
|
func (m *selectDefaultTarget) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||||
if len(b.Config.Targets) == 0 {
|
if len(b.Config.Targets) == 0 {
|
||||||
return fmt.Errorf("no targets defined")
|
return diag.Errorf("no targets defined")
|
||||||
}
|
}
|
||||||
|
|
||||||
// One target means there's only one default.
|
// One target means there's only one default.
|
||||||
|
@ -41,12 +41,12 @@ func (m *selectDefaultTarget) Apply(ctx context.Context, b *bundle.Bundle) error
|
||||||
|
|
||||||
// It is invalid to have multiple targets with the `default` flag set.
|
// It is invalid to have multiple targets with the `default` flag set.
|
||||||
if len(defaults) > 1 {
|
if len(defaults) > 1 {
|
||||||
return fmt.Errorf("multiple targets are marked as default (%s)", strings.Join(defaults, ", "))
|
return diag.Errorf("multiple targets are marked as default (%s)", strings.Join(defaults, ", "))
|
||||||
}
|
}
|
||||||
|
|
||||||
// If no target has the `default` flag set, ask the user to specify one.
|
// If no target has the `default` flag set, ask the user to specify one.
|
||||||
if len(defaults) == 0 {
|
if len(defaults) == 0 {
|
||||||
return fmt.Errorf("please specify target")
|
return diag.Errorf("please specify target")
|
||||||
}
|
}
|
||||||
|
|
||||||
// One default remaining.
|
// One default remaining.
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue