Merge remote-tracking branch 'databricks/main' into presets-catalog-schema-as-params

This commit is contained in:
Lennart Kats 2024-12-01 21:04:11 +01:00
commit 3ca2e674be
No known key found for this signature in database
GPG Key ID: 1EB8B57673197023
195 changed files with 6172 additions and 1989 deletions

View File

@ -5,8 +5,7 @@
},
"batch": {
".codegen/cmds-workspace.go.tmpl": "cmd/workspace/cmd.go",
".codegen/cmds-account.go.tmpl": "cmd/account/cmd.go",
".codegen/lookup.go.tmpl": "bundle/config/variable/lookup.go"
".codegen/cmds-account.go.tmpl": "cmd/account/cmd.go"
},
"toolchain": {
"required": ["go"],

View File

@ -1 +1 @@
cf9c61453990df0f9453670f2fe68e1b128647a2
f2385add116e3716c8a90a0b68e204deb40f996c

View File

@ -1,134 +0,0 @@
// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT.
package variable
{{ $allowlist :=
list
"alerts"
"clusters"
"cluster-policies"
"clusters"
"dashboards"
"instance-pools"
"jobs"
"metastores"
"pipelines"
"service-principals"
"queries"
"warehouses"
}}
{{ $customField :=
dict
"service-principals" "ApplicationId"
}}
import (
"context"
"fmt"
"github.com/databricks/databricks-sdk-go"
)
type Lookup struct {
{{range .Services -}}
{{- if in $allowlist .KebabName -}}
{{.Singular.PascalName}} string `json:"{{.Singular.SnakeName}},omitempty"`
{{end}}
{{- end}}
}
func LookupFromMap(m map[string]any) *Lookup {
l := &Lookup{}
{{range .Services -}}
{{- if in $allowlist .KebabName -}}
if v, ok := m["{{.Singular.SnakeName}}"]; ok {
l.{{.Singular.PascalName}} = v.(string)
}
{{end -}}
{{- end}}
return l
}
func (l *Lookup) Resolve(ctx context.Context, w *databricks.WorkspaceClient) (string, error) {
if err := l.validate(); err != nil {
return "", err
}
r := allResolvers()
{{range .Services -}}
{{- if in $allowlist .KebabName -}}
if l.{{.Singular.PascalName}} != "" {
return r.{{.Singular.PascalName}}(ctx, w, l.{{.Singular.PascalName}})
}
{{end -}}
{{- end}}
return "", fmt.Errorf("no valid lookup fields provided")
}
func (l *Lookup) String() string {
{{range .Services -}}
{{- if in $allowlist .KebabName -}}
if l.{{.Singular.PascalName}} != "" {
return fmt.Sprintf("{{.Singular.KebabName}}: %s", l.{{.Singular.PascalName}})
}
{{end -}}
{{- end}}
return ""
}
func (l *Lookup) validate() error {
// Validate that only one field is set
count := 0
{{range .Services -}}
{{- if in $allowlist .KebabName -}}
if l.{{.Singular.PascalName}} != "" {
count++
}
{{end -}}
{{- end}}
if count != 1 {
return fmt.Errorf("exactly one lookup field must be provided")
}
if strings.Contains(l.String(), "${var") {
return fmt.Errorf("lookup fields cannot contain variable references")
}
return nil
}
type resolverFunc func(ctx context.Context, w *databricks.WorkspaceClient, name string) (string, error)
type resolvers struct {
{{range .Services -}}
{{- if in $allowlist .KebabName -}}
{{.Singular.PascalName}} resolverFunc
{{end -}}
{{- end}}
}
func allResolvers() *resolvers {
r := &resolvers{}
{{range .Services -}}
{{- if in $allowlist .KebabName -}}
r.{{.Singular.PascalName}} = func(ctx context.Context, w *databricks.WorkspaceClient, name string) (string, error) {
fn, ok := lookupOverrides["{{.Singular.PascalName}}"]
if ok {
return fn(ctx, w, name)
}
entity, err := w.{{.PascalName}}.GetBy{{range .NamedIdMap.NamePath}}{{.PascalName}}{{end}}(ctx, name)
if err != nil {
return "", err
}
return fmt.Sprint(entity.{{ getOrDefault $customField .KebabName ((index .NamedIdMap.IdPath 0).PascalName) }}), nil
}
{{end -}}
{{- end}}
return r
}

View File

@ -115,6 +115,9 @@ func new{{.PascalName}}() *cobra.Command {
{{- if .Request}}
var {{.CamelName}}Req {{.Service.Package.Name}}.{{.Request.PascalName}}
{{- if .RequestBodyField }}
{{.CamelName}}Req.{{.RequestBodyField.PascalName}} = &{{.Service.Package.Name}}.{{.RequestBodyField.Entity.PascalName}}{}
{{- end }}
{{- if .CanUseJson}}
var {{.CamelName}}Json flags.JsonFlag
{{- end}}
@ -127,21 +130,27 @@ func new{{.PascalName}}() *cobra.Command {
cmd.Flags().BoolVar(&{{.CamelName}}SkipWait, "no-wait", {{.CamelName}}SkipWait, `do not wait to reach {{range $i, $e := .Wait.Success}}{{if $i}} or {{end}}{{.Content}}{{end}} state`)
cmd.Flags().DurationVar(&{{.CamelName}}Timeout, "timeout", {{.Wait.Timeout}}*time.Minute, `maximum amount of time to reach {{range $i, $e := .Wait.Success}}{{if $i}} or {{end}}{{.Content}}{{end}} state`)
{{end -}}
{{if .Request}}// TODO: short flags
{{- $request := .Request -}}
{{- if .RequestBodyField -}}
{{- $request = .RequestBodyField.Entity -}}
{{- end -}}
{{if $request }}// TODO: short flags
{{- if .CanUseJson}}
cmd.Flags().Var(&{{.CamelName}}Json, "json", `either inline JSON string or @path/to/file.json with request body`)
{{- end}}
{{$method := .}}
{{ if not .IsJsonOnly }}
{{range .Request.Fields -}}
{{range $request.Fields -}}
{{- if not .Required -}}
{{if .Entity.IsObject }}// TODO: complex arg: {{.Name}}
{{else if .Entity.IsAny }}// TODO: any: {{.Name}}
{{else if .Entity.ArrayValue }}// TODO: array: {{.Name}}
{{else if .Entity.MapValue }}// TODO: map via StringToStringVar: {{.Name}}
{{else if .Entity.IsEmpty }}// TODO: output-only field
{{else if .Entity.Enum }}cmd.Flags().Var(&{{$method.CamelName}}Req.{{.PascalName}}, "{{.KebabName}}", `{{.Summary | without "`" | trimSuffix "."}}. Supported values: {{template "printArray" .Entity.Enum}}`)
{{else}}cmd.Flags().{{template "arg-type" .Entity}}(&{{$method.CamelName}}Req.{{.PascalName}}, "{{.KebabName}}", {{$method.CamelName}}Req.{{.PascalName}}, `{{.Summary | without "`"}}`)
{{else if .Entity.IsComputed -}}
{{else if .IsOutputOnly -}}
{{else if .Entity.Enum }}cmd.Flags().Var(&{{- template "request-body-obj" (dict "Method" $method "Field" .)}}, "{{.KebabName}}", `{{.Summary | without "`" | trimSuffix "."}}. Supported values: {{template "printArray" .Entity.Enum}}`)
{{else}}cmd.Flags().{{template "arg-type" .Entity}}(&{{- template "request-body-obj" (dict "Method" $method "Field" .)}}, "{{.KebabName}}", {{- template "request-body-obj" (dict "Method" $method "Field" .)}}, `{{.Summary | without "`"}}`)
{{end}}
{{- end -}}
{{- end}}
@ -161,14 +170,14 @@ func new{{.PascalName}}() *cobra.Command {
{{- $noPrompt := or .IsCrudCreate (in $excludeFromPrompts $fullCommandName) }}
{{- $hasPosArgs := .HasRequiredPositionalArguments -}}
{{- $hasSinglePosArg := and $hasPosArgs (eq 1 (len .Request.RequiredFields)) -}}
{{- $hasSinglePosArg := and $hasPosArgs (eq 1 (len $request.RequiredFields)) -}}
{{- $serviceHasNamedIdMap := and (and .Service.List .Service.List.NamedIdMap) (not (eq .PascalName "List")) -}}
{{- $hasIdPrompt := and (not $noPrompt) (and $hasSinglePosArg $serviceHasNamedIdMap) -}}
{{- $wait := and .Wait (and (not .IsCrudRead) (not (eq .SnakeName "get_run"))) -}}
{{- $hasRequiredArgs := and (not $hasIdPrompt) $hasPosArgs -}}
{{- $hasSingleRequiredRequestBodyFieldWithPrompt := and (and $hasIdPrompt .Request) (eq 1 (len .Request.RequiredRequestBodyFields)) -}}
{{- $onlyPathArgsRequiredAsPositionalArguments := and .Request (eq (len .RequiredPositionalArguments) (len .Request.RequiredPathFields)) -}}
{{- $hasDifferentArgsWithJsonFlag := and (not $onlyPathArgsRequiredAsPositionalArguments) (and .CanUseJson .Request.HasRequiredRequestBodyFields) -}}
{{- $hasSingleRequiredRequestBodyFieldWithPrompt := and (and $hasIdPrompt $request) (eq 1 (len $request.RequiredRequestBodyFields)) -}}
{{- $onlyPathArgsRequiredAsPositionalArguments := and $request (eq (len .RequiredPositionalArguments) (len $request.RequiredPathFields)) -}}
{{- $hasDifferentArgsWithJsonFlag := and (not $onlyPathArgsRequiredAsPositionalArguments) (and .CanUseJson (or $request.HasRequiredRequestBodyFields )) -}}
{{- $hasCustomArgHandler := or $hasRequiredArgs $hasDifferentArgsWithJsonFlag -}}
{{- $atleastOneArgumentWithDescription := false -}}
@ -206,12 +215,12 @@ func new{{.PascalName}}() *cobra.Command {
cmd.Args = func(cmd *cobra.Command, args []string) error {
{{- if $hasDifferentArgsWithJsonFlag }}
if cmd.Flags().Changed("json") {
err := root.ExactArgs({{len .Request.RequiredPathFields}})(cmd, args)
err := root.ExactArgs({{len $request.RequiredPathFields}})(cmd, args)
if err != nil {
{{- if eq 0 (len .Request.RequiredPathFields) }}
return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide{{- range $index, $field := .Request.RequiredFields}}{{if $index}},{{end}} '{{$field.Name}}'{{end}} in your JSON input")
{{- if eq 0 (len $request.RequiredPathFields) }}
return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide{{- range $index, $field := $request.RequiredFields}}{{if $index}},{{end}} '{{$field.Name}}'{{end}} in your JSON input")
{{- else }}
return fmt.Errorf("when --json flag is specified, provide only{{- range $index, $field := .Request.RequiredPathFields}}{{if $index}},{{end}} {{$field.ConstantName}}{{end}} as positional arguments. Provide{{- range $index, $field := .Request.RequiredRequestBodyFields}}{{if $index}},{{end}} '{{$field.Name}}'{{end}} in your JSON input")
return fmt.Errorf("when --json flag is specified, provide only{{- range $index, $field := $request.RequiredPathFields}}{{if $index}},{{end}} {{$field.ConstantName}}{{end}} as positional arguments. Provide{{- range $index, $field := $request.RequiredRequestBodyFields}}{{if $index}},{{end}} '{{$field.Name}}'{{end}} in your JSON input")
{{- end }}
}
return nil
@ -232,7 +241,7 @@ func new{{.PascalName}}() *cobra.Command {
{{- if .Request }}
{{ if .CanUseJson }}
if cmd.Flags().Changed("json") {
diags := {{.CamelName}}Json.Unmarshal(&{{.CamelName}}Req)
diags := {{.CamelName}}Json.Unmarshal(&{{.CamelName}}Req{{ if .RequestBodyField }}.{{.RequestBodyField.PascalName}}{{ end }})
if diags.HasError() {
return diags.Error()
}
@ -251,20 +260,20 @@ func new{{.PascalName}}() *cobra.Command {
{{- if $hasIdPrompt}}
if len(args) == 0 {
promptSpinner := cmdio.Spinner(ctx)
promptSpinner <- "No{{range .Request.RequiredFields}} {{.ConstantName}}{{end}} argument specified. Loading names for {{.Service.TitleName}} drop-down."
promptSpinner <- "No{{range $request.RequiredFields}} {{.ConstantName}}{{end}} argument specified. Loading names for {{.Service.TitleName}} drop-down."
names, err := {{if .Service.IsAccounts}}a{{else}}w{{end}}.{{(.Service.TrimPrefix "account").PascalName}}.{{.Service.List.NamedIdMap.PascalName}}(ctx{{if .Service.List.Request}}, {{.Service.Package.Name}}.{{.Service.List.Request.PascalName}}{}{{end}})
close(promptSpinner)
if err != nil {
return fmt.Errorf("failed to load names for {{.Service.TitleName}} drop-down. Please manually specify required arguments. Original error: %w", err)
}
id, err := cmdio.Select(ctx, names, "{{range .Request.RequiredFields}}{{.Summary | trimSuffix "."}}{{end}}")
id, err := cmdio.Select(ctx, names, "{{range $request.RequiredFields}}{{.Summary | trimSuffix "."}}{{end}}")
if err != nil {
return err
}
args = append(args, id)
}
if len(args) != 1 {
return fmt.Errorf("expected to have {{range .Request.RequiredFields}}{{.Summary | trimSuffix "." | lower}}{{end}}")
return fmt.Errorf("expected to have {{range $request.RequiredFields}}{{.Summary | trimSuffix "." | lower}}{{end}}")
}
{{- end -}}
@ -388,13 +397,19 @@ func new{{.PascalName}}() *cobra.Command {
if !cmd.Flags().Changed("json") {
{{- end }}
{{if not $field.Entity.IsString -}}
_, err = fmt.Sscan(args[{{$arg}}], &{{$method.CamelName}}Req.{{$field.PascalName}})
_, err = fmt.Sscan(args[{{$arg}}], &{{- template "request-body-obj" (dict "Method" $method "Field" $field)}})
if err != nil {
return fmt.Errorf("invalid {{$field.ConstantName}}: %s", args[{{$arg}}])
}{{else -}}
{{$method.CamelName}}Req.{{$field.PascalName}} = args[{{$arg}}]
{{- template "request-body-obj" (dict "Method" $method "Field" $field)}} = args[{{$arg}}]
{{- end -}}
{{- if $optionalIfJsonIsUsed }}
}
{{- end }}
{{- end -}}
{{- define "request-body-obj" -}}
{{- $method := .Method -}}
{{- $field := .Field -}}
{{$method.CamelName}}Req{{ if (and $method.RequestBodyField (not $field.IsPath)) }}.{{$method.RequestBodyField.PascalName}}{{end}}.{{$field.PascalName}}
{{- end -}}

5
.gitattributes vendored
View File

@ -1,4 +1,3 @@
bundle/config/variable/lookup.go linguist-generated=true
cmd/account/access-control/access-control.go linguist-generated=true
cmd/account/billable-usage/billable-usage.go linguist-generated=true
cmd/account/budgets/budgets.go linguist-generated=true
@ -30,13 +29,14 @@ cmd/account/users/users.go linguist-generated=true
cmd/account/vpc-endpoints/vpc-endpoints.go linguist-generated=true
cmd/account/workspace-assignment/workspace-assignment.go linguist-generated=true
cmd/account/workspaces/workspaces.go linguist-generated=true
cmd/workspace/aibi-dashboard-embedding-access-policy/aibi-dashboard-embedding-access-policy.go linguist-generated=true
cmd/workspace/aibi-dashboard-embedding-approved-domains/aibi-dashboard-embedding-approved-domains.go linguist-generated=true
cmd/workspace/alerts-legacy/alerts-legacy.go linguist-generated=true
cmd/workspace/alerts/alerts.go linguist-generated=true
cmd/workspace/apps/apps.go linguist-generated=true
cmd/workspace/artifact-allowlists/artifact-allowlists.go linguist-generated=true
cmd/workspace/automatic-cluster-update/automatic-cluster-update.go linguist-generated=true
cmd/workspace/catalogs/catalogs.go linguist-generated=true
cmd/workspace/clean-rooms/clean-rooms.go linguist-generated=true
cmd/workspace/cluster-policies/cluster-policies.go linguist-generated=true
cmd/workspace/clusters/clusters.go linguist-generated=true
cmd/workspace/cmd.go linguist-generated=true
@ -48,6 +48,7 @@ cmd/workspace/consumer-listings/consumer-listings.go linguist-generated=true
cmd/workspace/consumer-personalization-requests/consumer-personalization-requests.go linguist-generated=true
cmd/workspace/consumer-providers/consumer-providers.go linguist-generated=true
cmd/workspace/credentials-manager/credentials-manager.go linguist-generated=true
cmd/workspace/credentials/credentials.go linguist-generated=true
cmd/workspace/current-user/current-user.go linguist-generated=true
cmd/workspace/dashboard-widgets/dashboard-widgets.go linguist-generated=true
cmd/workspace/dashboards/dashboards.go linguist-generated=true

View File

@ -1,5 +1,68 @@
# Version changelog
## [Release] Release v0.235.0
**Note:** the `bundle generate` command now uses the `.<resource-type>.yml`
sub-extension for the configuration files it writes. Existing configuration
files that do not use this sub-extension are renamed to include it.
Bundles:
* Make `TableName` field part of quality monitor schema ([#1903](https://github.com/databricks/cli/pull/1903)).
* Do not prepend paths starting with ~ or variable reference ([#1905](https://github.com/databricks/cli/pull/1905)).
* Fix workspace extensions filer accidentally reading notebooks ([#1891](https://github.com/databricks/cli/pull/1891)).
* Fix template initialization when running on Databricks ([#1912](https://github.com/databricks/cli/pull/1912)).
* Source-linked deployments for bundles in the workspace ([#1884](https://github.com/databricks/cli/pull/1884)).
* Added integration test to deploy bundle to /Shared root path ([#1914](https://github.com/databricks/cli/pull/1914)).
* Update filenames used by bundle generate to use `.<resource-type>.yml` ([#1901](https://github.com/databricks/cli/pull/1901)).
Internal:
* Extract functionality to detect if the CLI is running on DBR ([#1889](https://github.com/databricks/cli/pull/1889)).
* Consolidate test helpers for `io/fs` ([#1906](https://github.com/databricks/cli/pull/1906)).
* Use `fs.FS` interface to read template ([#1910](https://github.com/databricks/cli/pull/1910)).
* Use `filer.Filer` to write template instantiation ([#1911](https://github.com/databricks/cli/pull/1911)).
## [Release] Release v0.234.0
Bundles:
* Do not execute build on bundle destroy ([#1882](https://github.com/databricks/cli/pull/1882)).
* Add support for non-Python ipynb notebooks to DABs ([#1827](https://github.com/databricks/cli/pull/1827)).
API Changes:
* Added `databricks credentials` command group.
* Changed `databricks lakeview create` command with new required argument order.
OpenAPI commit d25296d2f4aa7bd6195c816fdf82e0f960f775da (2024-11-07)
Dependency updates:
* Upgrade TF provider to 1.58.0 ([#1900](https://github.com/databricks/cli/pull/1900)).
* Bump golang.org/x/sync from 0.8.0 to 0.9.0 ([#1892](https://github.com/databricks/cli/pull/1892)).
* Bump golang.org/x/text from 0.19.0 to 0.20.0 ([#1893](https://github.com/databricks/cli/pull/1893)).
* Bump golang.org/x/mod from 0.21.0 to 0.22.0 ([#1895](https://github.com/databricks/cli/pull/1895)).
* Bump golang.org/x/oauth2 from 0.23.0 to 0.24.0 ([#1894](https://github.com/databricks/cli/pull/1894)).
* Bump github.com/databricks/databricks-sdk-go from 0.49.0 to 0.51.0 ([#1878](https://github.com/databricks/cli/pull/1878)).
## [Release] Release v0.233.0
CLI:
* Clean host URL in the `auth login` command ([#1879](https://github.com/databricks/cli/pull/1879)).
Bundles:
* Fix bundle run when run interactively ([#1880](https://github.com/databricks/cli/pull/1880)).
* Fix relative path resolution for dashboards on Windows ([#1881](https://github.com/databricks/cli/pull/1881)).
Internal:
* Address goreleaser deprecation warning ([#1872](https://github.com/databricks/cli/pull/1872)).
* Update actions/github-script to v7 ([#1873](https://github.com/databricks/cli/pull/1873)).
* Use Go 1.23 ([#1871](https://github.com/databricks/cli/pull/1871)).
* [Internal] Always write message for manual integration test trigger ([#1874](https://github.com/databricks/cli/pull/1874)).
* Add `cmd-exec-id` to user agent ([#1808](https://github.com/databricks/cli/pull/1808)).
* Added E2E test to run Python wheels on interactive cluster created in bundle ([#1864](https://github.com/databricks/cli/pull/1864)).
Dependency updates:
* Bump github.com/hashicorp/terraform-json from 0.22.1 to 0.23.0 ([#1877](https://github.com/databricks/cli/pull/1877)).
## [Release] Release v0.232.1
This patch release fixes the following error observed when deploying to /Shared root folder

View File

@ -17,7 +17,6 @@ import (
"github.com/databricks/cli/bundle/env"
"github.com/databricks/cli/bundle/metadata"
"github.com/databricks/cli/libs/fileset"
"github.com/databricks/cli/libs/git"
"github.com/databricks/cli/libs/locker"
"github.com/databricks/cli/libs/log"
"github.com/databricks/cli/libs/tags"
@ -223,15 +222,6 @@ func (b *Bundle) GetSyncIncludePatterns(ctx context.Context) ([]string, error) {
return append(b.Config.Sync.Include, filepath.ToSlash(filepath.Join(internalDirRel, "*.*"))), nil
}
func (b *Bundle) GitRepository() (*git.Repository, error) {
_, err := vfs.FindLeafInTree(b.BundleRoot, ".git")
if err != nil {
return nil, fmt.Errorf("unable to locate repository root: %w", err)
}
return git.NewRepository(b.BundleRoot)
}
// AuthEnv returns a map with environment variables and their values
// derived from the workspace client configuration that was resolved
// in the context of this bundle.

View File

@ -11,6 +11,7 @@ import (
"github.com/databricks/cli/bundle"
"github.com/databricks/cli/bundle/config"
"github.com/databricks/cli/bundle/config/resources"
"github.com/databricks/cli/libs/dbr"
"github.com/databricks/cli/libs/diag"
"github.com/databricks/cli/libs/dyn"
"github.com/databricks/cli/libs/textutil"
@ -278,13 +279,34 @@ func (m *applyPresets) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnos
// Dashboards: Prefix
for key, dashboard := range r.Dashboards {
if dashboard == nil || dashboard.CreateDashboardRequest == nil {
if dashboard == nil || dashboard.Dashboard == nil {
diags = diags.Extend(diag.Errorf("dashboard %s s is not defined", key))
continue
}
dashboard.DisplayName = prefix + dashboard.DisplayName
}
if config.IsExplicitlyEnabled((b.Config.Presets.SourceLinkedDeployment)) {
isDatabricksWorkspace := dbr.RunsOnRuntime(ctx) && strings.HasPrefix(b.SyncRootPath, "/Workspace/")
if !isDatabricksWorkspace {
target := b.Config.Bundle.Target
path := dyn.NewPath(dyn.Key("targets"), dyn.Key(target), dyn.Key("presets"), dyn.Key("source_linked_deployment"))
diags = diags.Append(
diag.Diagnostic{
Severity: diag.Warning,
Summary: "source-linked deployment is available only in the Databricks Workspace",
Paths: []dyn.Path{
path,
},
Locations: b.Config.GetLocations(path[2:].String()),
},
)
disabled := false
b.Config.Presets.SourceLinkedDeployment = &disabled
}
}
return diags
}

View File

@ -2,12 +2,16 @@ package mutator_test
import (
"context"
"runtime"
"testing"
"github.com/databricks/cli/bundle"
"github.com/databricks/cli/bundle/config"
"github.com/databricks/cli/bundle/config/mutator"
"github.com/databricks/cli/bundle/config/resources"
"github.com/databricks/cli/bundle/internal/bundletest"
"github.com/databricks/cli/libs/dbr"
"github.com/databricks/cli/libs/dyn"
"github.com/databricks/databricks-sdk-go/service/catalog"
"github.com/databricks/databricks-sdk-go/service/jobs"
"github.com/stretchr/testify/require"
@ -364,3 +368,88 @@ func TestApplyPresetsResourceNotDefined(t *testing.T) {
})
}
}
func TestApplyPresetsSourceLinkedDeployment(t *testing.T) {
if runtime.GOOS == "windows" {
t.Skip("this test is not applicable on Windows because source-linked mode works only in the Databricks Workspace")
}
testContext := context.Background()
enabled := true
disabled := false
workspacePath := "/Workspace/user.name@company.com"
tests := []struct {
bundlePath string
ctx context.Context
name string
initialValue *bool
expectedValue *bool
expectedWarning string
}{
{
name: "preset enabled, bundle in Workspace, databricks runtime",
bundlePath: workspacePath,
ctx: dbr.MockRuntime(testContext, true),
initialValue: &enabled,
expectedValue: &enabled,
},
{
name: "preset enabled, bundle not in Workspace, databricks runtime",
bundlePath: "/Users/user.name@company.com",
ctx: dbr.MockRuntime(testContext, true),
initialValue: &enabled,
expectedValue: &disabled,
expectedWarning: "source-linked deployment is available only in the Databricks Workspace",
},
{
name: "preset enabled, bundle in Workspace, not databricks runtime",
bundlePath: workspacePath,
ctx: dbr.MockRuntime(testContext, false),
initialValue: &enabled,
expectedValue: &disabled,
expectedWarning: "source-linked deployment is available only in the Databricks Workspace",
},
{
name: "preset disabled, bundle in Workspace, databricks runtime",
bundlePath: workspacePath,
ctx: dbr.MockRuntime(testContext, true),
initialValue: &disabled,
expectedValue: &disabled,
},
{
name: "preset nil, bundle in Workspace, databricks runtime",
bundlePath: workspacePath,
ctx: dbr.MockRuntime(testContext, true),
initialValue: nil,
expectedValue: nil,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
b := &bundle.Bundle{
SyncRootPath: tt.bundlePath,
Config: config.Root{
Presets: config.Presets{
SourceLinkedDeployment: tt.initialValue,
},
},
}
bundletest.SetLocation(b, "presets.source_linked_deployment", []dyn.Location{{File: "databricks.yml"}})
diags := bundle.Apply(tt.ctx, b, mutator.ApplyPresets())
if diags.HasError() {
t.Fatalf("unexpected error: %v", diags)
}
if tt.expectedWarning != "" {
require.Equal(t, tt.expectedWarning, diags[0].Summary)
require.NotEmpty(t, diags[0].Locations)
}
require.Equal(t, tt.expectedValue, b.Config.Presets.SourceLinkedDeployment)
})
}
}

View File

@ -26,13 +26,13 @@ func TestConfigureDashboardDefaultsParentPath(t *testing.T) {
"d1": {
// Empty string is skipped.
// See below for how it is set.
CreateDashboardRequest: &dashboards.CreateDashboardRequest{
Dashboard: &dashboards.Dashboard{
ParentPath: "",
},
},
"d2": {
// Non-empty string is skipped.
CreateDashboardRequest: &dashboards.CreateDashboardRequest{
Dashboard: &dashboards.Dashboard{
ParentPath: "already-set",
},
},

View File

@ -5,14 +5,12 @@ import (
"strings"
"github.com/databricks/cli/bundle"
"github.com/databricks/cli/libs/dbr"
"github.com/databricks/cli/libs/diag"
"github.com/databricks/cli/libs/env"
"github.com/databricks/cli/libs/filer"
"github.com/databricks/cli/libs/vfs"
)
const envDatabricksRuntimeVersion = "DATABRICKS_RUNTIME_VERSION"
type configureWSFS struct{}
func ConfigureWSFS() bundle.Mutator {
@ -32,7 +30,7 @@ func (m *configureWSFS) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagno
}
// The executable must be running on DBR.
if _, ok := env.Lookup(ctx, envDatabricksRuntimeVersion); !ok {
if !dbr.RunsOnRuntime(ctx) {
return nil
}

View File

@ -0,0 +1,65 @@
package mutator_test
import (
"context"
"runtime"
"testing"
"github.com/databricks/cli/bundle"
"github.com/databricks/cli/bundle/config/mutator"
"github.com/databricks/cli/libs/dbr"
"github.com/databricks/cli/libs/vfs"
"github.com/databricks/databricks-sdk-go/config"
"github.com/databricks/databricks-sdk-go/experimental/mocks"
"github.com/stretchr/testify/assert"
)
func mockBundleForConfigureWSFS(t *testing.T, syncRootPath string) *bundle.Bundle {
// The native path of the sync root on Windows will never match the /Workspace prefix,
// so the test case for nominal behavior will always fail.
if runtime.GOOS == "windows" {
t.Skip("this test is not applicable on Windows")
}
b := &bundle.Bundle{
SyncRoot: vfs.MustNew(syncRootPath),
}
w := mocks.NewMockWorkspaceClient(t)
w.WorkspaceClient.Config = &config.Config{}
b.SetWorkpaceClient(w.WorkspaceClient)
return b
}
func TestConfigureWSFS_SkipsIfNotWorkspacePrefix(t *testing.T) {
b := mockBundleForConfigureWSFS(t, "/foo")
originalSyncRoot := b.SyncRoot
ctx := context.Background()
diags := bundle.Apply(ctx, b, mutator.ConfigureWSFS())
assert.Empty(t, diags)
assert.Equal(t, originalSyncRoot, b.SyncRoot)
}
func TestConfigureWSFS_SkipsIfNotRunningOnRuntime(t *testing.T) {
b := mockBundleForConfigureWSFS(t, "/Workspace/foo")
originalSyncRoot := b.SyncRoot
ctx := context.Background()
ctx = dbr.MockRuntime(ctx, false)
diags := bundle.Apply(ctx, b, mutator.ConfigureWSFS())
assert.Empty(t, diags)
assert.Equal(t, originalSyncRoot, b.SyncRoot)
}
func TestConfigureWSFS_SwapSyncRoot(t *testing.T) {
b := mockBundleForConfigureWSFS(t, "/Workspace/foo")
originalSyncRoot := b.SyncRoot
ctx := context.Background()
ctx = dbr.MockRuntime(ctx, true)
diags := bundle.Apply(ctx, b, mutator.ConfigureWSFS())
assert.Empty(t, diags)
assert.NotEqual(t, originalSyncRoot, b.SyncRoot)
}

View File

@ -65,9 +65,8 @@ func TestInitializeURLs(t *testing.T) {
},
QualityMonitors: map[string]*resources.QualityMonitor{
"qualityMonitor1": {
CreateMonitor: &catalog.CreateMonitor{
TableName: "catalog.schema.qualityMonitor1",
},
TableName: "catalog.schema.qualityMonitor1",
CreateMonitor: &catalog.CreateMonitor{},
},
},
Schemas: map[string]*resources.Schema{
@ -89,7 +88,7 @@ func TestInitializeURLs(t *testing.T) {
Dashboards: map[string]*resources.Dashboard{
"dashboard1": {
ID: "01ef8d56871e1d50ae30ce7375e42478",
CreateDashboardRequest: &dashboards.CreateDashboardRequest{
Dashboard: &dashboards.Dashboard{
DisplayName: "My special dashboard",
},
},

View File

@ -44,6 +44,11 @@ func (m *prependWorkspacePrefix) Apply(ctx context.Context, b *bundle.Bundle) di
return dyn.InvalidValue, fmt.Errorf("expected string, got %s", v.Kind())
}
// Skip prefixing if the path does not start with /, it might be variable reference or smth else.
if !strings.HasPrefix(path, "/") {
return pv, nil
}
for _, prefix := range skipPrefixes {
if strings.HasPrefix(path, prefix) {
return pv, nil

View File

@ -31,6 +31,14 @@ func TestPrependWorkspacePrefix(t *testing.T) {
path: "/Volumes/Users/test",
expected: "/Volumes/Users/test",
},
{
path: "~/test",
expected: "~/test",
},
{
path: "${workspace.file_path}/test",
expected: "${workspace.file_path}/test",
},
}
for _, tc := range testCases {

View File

@ -6,6 +6,7 @@ import (
"github.com/databricks/cli/bundle"
"github.com/databricks/cli/bundle/config"
"github.com/databricks/cli/libs/dbr"
"github.com/databricks/cli/libs/diag"
"github.com/databricks/cli/libs/dyn"
"github.com/databricks/cli/libs/iamutil"
@ -57,6 +58,14 @@ func transformDevelopmentMode(ctx context.Context, b *bundle.Bundle) {
t.TriggerPauseStatus = config.Paused
}
if !config.IsExplicitlyDisabled(t.SourceLinkedDeployment) {
isInWorkspace := strings.HasPrefix(b.SyncRootPath, "/Workspace/")
if isInWorkspace && dbr.RunsOnRuntime(ctx) {
enabled := true
t.SourceLinkedDeployment = &enabled
}
}
if !config.IsExplicitlyDisabled(t.PipelinesDevelopment) {
enabled := true
t.PipelinesDevelopment = &enabled

View File

@ -3,14 +3,17 @@ package mutator
import (
"context"
"reflect"
"runtime"
"strings"
"testing"
"github.com/databricks/cli/bundle"
"github.com/databricks/cli/bundle/config"
"github.com/databricks/cli/bundle/config/resources"
"github.com/databricks/cli/libs/dbr"
"github.com/databricks/cli/libs/diag"
"github.com/databricks/cli/libs/tags"
"github.com/databricks/cli/libs/vfs"
sdkconfig "github.com/databricks/databricks-sdk-go/config"
"github.com/databricks/databricks-sdk-go/service/catalog"
"github.com/databricks/databricks-sdk-go/service/compute"
@ -102,16 +105,23 @@ func mockBundle(mode config.Mode) *bundle.Bundle {
"registeredmodel1": {CreateRegisteredModelRequest: &catalog.CreateRegisteredModelRequest{Name: "registeredmodel1"}},
},
QualityMonitors: map[string]*resources.QualityMonitor{
"qualityMonitor1": {CreateMonitor: &catalog.CreateMonitor{TableName: "qualityMonitor1"}},
"qualityMonitor2": {
"qualityMonitor1": {
TableName: "qualityMonitor1",
CreateMonitor: &catalog.CreateMonitor{
TableName: "qualityMonitor2",
Schedule: &catalog.MonitorCronSchedule{},
OutputSchemaName: "catalog.schema",
},
},
"qualityMonitor2": {
TableName: "qualityMonitor2",
CreateMonitor: &catalog.CreateMonitor{
OutputSchemaName: "catalog.schema",
Schedule: &catalog.MonitorCronSchedule{},
},
},
"qualityMonitor3": {
TableName: "qualityMonitor3",
CreateMonitor: &catalog.CreateMonitor{
TableName: "qualityMonitor3",
OutputSchemaName: "catalog.schema",
Schedule: &catalog.MonitorCronSchedule{
PauseStatus: catalog.MonitorCronSchedulePauseStatusUnpaused,
},
@ -126,13 +136,14 @@ func mockBundle(mode config.Mode) *bundle.Bundle {
},
Dashboards: map[string]*resources.Dashboard{
"dashboard1": {
CreateDashboardRequest: &dashboards.CreateDashboardRequest{
Dashboard: &dashboards.Dashboard{
DisplayName: "dashboard1",
},
},
},
},
},
SyncRoot: vfs.MustNew("/Users/lennart.kats@databricks.com"),
// Use AWS implementation for testing.
Tagging: tags.ForCloud(&sdkconfig.Config{
Host: "https://company.cloud.databricks.com",
@ -515,3 +526,32 @@ func TestPipelinesDevelopmentDisabled(t *testing.T) {
assert.False(t, b.Config.Resources.Pipelines["pipeline1"].PipelineSpec.Development)
}
func TestSourceLinkedDeploymentEnabled(t *testing.T) {
b, diags := processSourceLinkedBundle(t, true)
require.NoError(t, diags.Error())
assert.True(t, *b.Config.Presets.SourceLinkedDeployment)
}
func TestSourceLinkedDeploymentDisabled(t *testing.T) {
b, diags := processSourceLinkedBundle(t, false)
require.NoError(t, diags.Error())
assert.False(t, *b.Config.Presets.SourceLinkedDeployment)
}
func processSourceLinkedBundle(t *testing.T, presetEnabled bool) (*bundle.Bundle, diag.Diagnostics) {
if runtime.GOOS == "windows" {
t.Skip("this test is not applicable on Windows because source-linked mode works only in the Databricks Workspace")
}
b := mockBundle(config.Development)
workspacePath := "/Workspace/lennart@company.com/"
b.SyncRootPath = workspacePath
b.Config.Presets.SourceLinkedDeployment = &presetEnabled
ctx := dbr.MockRuntime(context.Background(), true)
m := bundle.Seq(ProcessTargetMode(), ApplyPresets())
diags := bundle.Apply(ctx, b, m)
return b, diags
}

View File

@ -11,6 +11,7 @@ import (
"strings"
"github.com/databricks/cli/bundle"
"github.com/databricks/cli/bundle/config"
"github.com/databricks/cli/libs/diag"
"github.com/databricks/cli/libs/dyn"
"github.com/databricks/cli/libs/notebook"
@ -103,8 +104,13 @@ func (t *translateContext) rewritePath(
return fmt.Errorf("path %s is not contained in sync root path", localPath)
}
// Prefix remote path with its remote root path.
remotePath := path.Join(t.b.Config.Workspace.FilePath, filepath.ToSlash(localRelPath))
var workspacePath string
if config.IsExplicitlyEnabled(t.b.Config.Presets.SourceLinkedDeployment) {
workspacePath = t.b.SyncRootPath
} else {
workspacePath = t.b.Config.Workspace.FilePath
}
remotePath := path.Join(workspacePath, filepath.ToSlash(localRelPath))
// Convert local path into workspace path via specified function.
interp, err := fn(*p, localPath, localRelPath, remotePath)
@ -120,7 +126,33 @@ func (t *translateContext) rewritePath(
func (t *translateContext) translateNotebookPath(literal, localFullPath, localRelPath, remotePath string) (string, error) {
nb, _, err := notebook.DetectWithFS(t.b.SyncRoot, filepath.ToSlash(localRelPath))
if errors.Is(err, fs.ErrNotExist) {
return "", fmt.Errorf("notebook %s not found", literal)
if filepath.Ext(localFullPath) != notebook.ExtensionNone {
return "", fmt.Errorf("notebook %s not found", literal)
}
extensions := []string{
notebook.ExtensionPython,
notebook.ExtensionR,
notebook.ExtensionScala,
notebook.ExtensionSql,
notebook.ExtensionJupyter,
}
// Check whether a file with a notebook extension already exists. This
// way we can provide a more targeted error message.
for _, ext := range extensions {
literalWithExt := literal + ext
localRelPathWithExt := filepath.ToSlash(localRelPath + ext)
if _, err := fs.Stat(t.b.SyncRoot, localRelPathWithExt); err == nil {
return "", fmt.Errorf(`notebook %s not found. Did you mean %s?
Local notebook references are expected to contain one of the following
file extensions: [%s]`, literal, literalWithExt, strings.Join(extensions, ", "))
}
}
// Return a generic error message if no matching possible file is found.
return "", fmt.Errorf(`notebook %s not found. Local notebook references are expected
to contain one of the following file extensions: [%s]`, literal, strings.Join(extensions, ", "))
}
if err != nil {
return "", fmt.Errorf("unable to determine if %s is a notebook: %w", localFullPath, err)
@ -163,7 +195,7 @@ func (t *translateContext) translateNoOp(literal, localFullPath, localRelPath, r
}
func (t *translateContext) retainLocalAbsoluteFilePath(literal, localFullPath, localRelPath, remotePath string) (string, error) {
info, err := t.b.SyncRoot.Stat(localRelPath)
info, err := t.b.SyncRoot.Stat(filepath.ToSlash(localRelPath))
if errors.Is(err, fs.ErrNotExist) {
return "", fmt.Errorf("file %s not found", literal)
}

View File

@ -0,0 +1,54 @@
package mutator_test
import (
"context"
"path/filepath"
"testing"
"github.com/databricks/cli/bundle"
"github.com/databricks/cli/bundle/config"
"github.com/databricks/cli/bundle/config/mutator"
"github.com/databricks/cli/bundle/config/resources"
"github.com/databricks/cli/bundle/internal/bundletest"
"github.com/databricks/cli/libs/dyn"
"github.com/databricks/cli/libs/vfs"
"github.com/databricks/databricks-sdk-go/service/dashboards"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestTranslatePathsDashboards_FilePathRelativeSubDirectory(t *testing.T) {
dir := t.TempDir()
touchEmptyFile(t, filepath.Join(dir, "src", "my_dashboard.lvdash.json"))
b := &bundle.Bundle{
SyncRootPath: dir,
SyncRoot: vfs.MustNew(dir),
Config: config.Root{
Resources: config.Resources{
Dashboards: map[string]*resources.Dashboard{
"dashboard": {
Dashboard: &dashboards.Dashboard{
DisplayName: "My Dashboard",
},
FilePath: "../src/my_dashboard.lvdash.json",
},
},
},
},
}
bundletest.SetLocation(b, "resources.dashboards", []dyn.Location{{
File: filepath.Join(dir, "resources/dashboard.yml"),
}})
diags := bundle.Apply(context.Background(), b, mutator.TranslatePaths())
require.NoError(t, diags.Error())
// Assert that the file path for the dashboard has been converted to its local absolute path.
assert.Equal(
t,
filepath.Join(dir, "src", "my_dashboard.lvdash.json"),
b.Config.Resources.Dashboards["dashboard"].FilePath,
)
}

View File

@ -2,8 +2,10 @@ package mutator_test
import (
"context"
"fmt"
"os"
"path/filepath"
"runtime"
"strings"
"testing"
@ -507,6 +509,59 @@ func TestPipelineNotebookDoesNotExistError(t *testing.T) {
assert.EqualError(t, diags.Error(), "notebook ./doesnt_exist.py not found")
}
func TestPipelineNotebookDoesNotExistErrorWithoutExtension(t *testing.T) {
for _, ext := range []string{
".py",
".r",
".scala",
".sql",
".ipynb",
"",
} {
t.Run("case_"+ext, func(t *testing.T) {
dir := t.TempDir()
if ext != "" {
touchEmptyFile(t, filepath.Join(dir, "foo"+ext))
}
b := &bundle.Bundle{
SyncRootPath: dir,
SyncRoot: vfs.MustNew(dir),
Config: config.Root{
Resources: config.Resources{
Pipelines: map[string]*resources.Pipeline{
"pipeline": {
PipelineSpec: &pipelines.PipelineSpec{
Libraries: []pipelines.PipelineLibrary{
{
Notebook: &pipelines.NotebookLibrary{
Path: "./foo",
},
},
},
},
},
},
},
},
}
bundletest.SetLocation(b, ".", []dyn.Location{{File: filepath.Join(dir, "fake.yml")}})
diags := bundle.Apply(context.Background(), b, mutator.TranslatePaths())
if ext == "" {
assert.EqualError(t, diags.Error(), `notebook ./foo not found. Local notebook references are expected
to contain one of the following file extensions: [.py, .r, .scala, .sql, .ipynb]`)
} else {
assert.EqualError(t, diags.Error(), fmt.Sprintf(`notebook ./foo not found. Did you mean ./foo%s?
Local notebook references are expected to contain one of the following
file extensions: [.py, .r, .scala, .sql, .ipynb]`, ext))
}
})
}
}
func TestPipelineFileDoesNotExistError(t *testing.T) {
dir := t.TempDir()
@ -787,3 +842,163 @@ func TestTranslatePathWithComplexVariables(t *testing.T) {
b.Config.Resources.Jobs["job"].Tasks[0].Libraries[0].Whl,
)
}
func TestTranslatePathsWithSourceLinkedDeployment(t *testing.T) {
if runtime.GOOS == "windows" {
t.Skip("this test is not applicable on Windows because source-linked mode works only in the Databricks Workspace")
}
dir := t.TempDir()
touchNotebookFile(t, filepath.Join(dir, "my_job_notebook.py"))
touchNotebookFile(t, filepath.Join(dir, "my_pipeline_notebook.py"))
touchEmptyFile(t, filepath.Join(dir, "my_python_file.py"))
touchEmptyFile(t, filepath.Join(dir, "dist", "task.jar"))
touchEmptyFile(t, filepath.Join(dir, "requirements.txt"))
enabled := true
b := &bundle.Bundle{
SyncRootPath: dir,
SyncRoot: vfs.MustNew(dir),
Config: config.Root{
Workspace: config.Workspace{
FilePath: "/bundle",
},
Resources: config.Resources{
Jobs: map[string]*resources.Job{
"job": {
JobSettings: &jobs.JobSettings{
Tasks: []jobs.Task{
{
NotebookTask: &jobs.NotebookTask{
NotebookPath: "my_job_notebook.py",
},
Libraries: []compute.Library{
{Whl: "./dist/task.whl"},
},
},
{
NotebookTask: &jobs.NotebookTask{
NotebookPath: "/Users/jane.doe@databricks.com/absolute_remote.py",
},
},
{
NotebookTask: &jobs.NotebookTask{
NotebookPath: "my_job_notebook.py",
},
Libraries: []compute.Library{
{Requirements: "requirements.txt"},
},
},
{
SparkPythonTask: &jobs.SparkPythonTask{
PythonFile: "my_python_file.py",
},
},
{
SparkJarTask: &jobs.SparkJarTask{
MainClassName: "HelloWorld",
},
Libraries: []compute.Library{
{Jar: "./dist/task.jar"},
},
},
{
SparkJarTask: &jobs.SparkJarTask{
MainClassName: "HelloWorldRemote",
},
Libraries: []compute.Library{
{Jar: "dbfs:/bundle/dist/task_remote.jar"},
},
},
},
},
},
},
Pipelines: map[string]*resources.Pipeline{
"pipeline": {
PipelineSpec: &pipelines.PipelineSpec{
Libraries: []pipelines.PipelineLibrary{
{
Notebook: &pipelines.NotebookLibrary{
Path: "my_pipeline_notebook.py",
},
},
{
Notebook: &pipelines.NotebookLibrary{
Path: "/Users/jane.doe@databricks.com/absolute_remote.py",
},
},
{
File: &pipelines.FileLibrary{
Path: "my_python_file.py",
},
},
},
},
},
},
},
Presets: config.Presets{
SourceLinkedDeployment: &enabled,
},
},
}
bundletest.SetLocation(b, ".", []dyn.Location{{File: filepath.Join(dir, "resource.yml")}})
diags := bundle.Apply(context.Background(), b, mutator.TranslatePaths())
require.NoError(t, diags.Error())
// updated to source path
assert.Equal(
t,
filepath.Join(dir, "my_job_notebook"),
b.Config.Resources.Jobs["job"].Tasks[0].NotebookTask.NotebookPath,
)
assert.Equal(
t,
filepath.Join(dir, "requirements.txt"),
b.Config.Resources.Jobs["job"].Tasks[2].Libraries[0].Requirements,
)
assert.Equal(
t,
filepath.Join(dir, "my_python_file.py"),
b.Config.Resources.Jobs["job"].Tasks[3].SparkPythonTask.PythonFile,
)
assert.Equal(
t,
filepath.Join(dir, "my_pipeline_notebook"),
b.Config.Resources.Pipelines["pipeline"].Libraries[0].Notebook.Path,
)
assert.Equal(
t,
filepath.Join(dir, "my_python_file.py"),
b.Config.Resources.Pipelines["pipeline"].Libraries[2].File.Path,
)
// left as is
assert.Equal(
t,
filepath.Join("dist", "task.whl"),
b.Config.Resources.Jobs["job"].Tasks[0].Libraries[0].Whl,
)
assert.Equal(
t,
"/Users/jane.doe@databricks.com/absolute_remote.py",
b.Config.Resources.Jobs["job"].Tasks[1].NotebookTask.NotebookPath,
)
assert.Equal(
t,
filepath.Join("dist", "task.jar"),
b.Config.Resources.Jobs["job"].Tasks[4].Libraries[0].Jar,
)
assert.Equal(
t,
"dbfs:/bundle/dist/task_remote.jar",
b.Config.Resources.Jobs["job"].Tasks[5].Libraries[0].Jar,
)
assert.Equal(
t,
"/Users/jane.doe@databricks.com/absolute_remote.py",
b.Config.Resources.Pipelines["pipeline"].Libraries[1].Notebook.Path,
)
}

View File

@ -17,6 +17,11 @@ type Presets struct {
// JobsMaxConcurrentRuns is the default value for the max concurrent runs of jobs.
JobsMaxConcurrentRuns int `json:"jobs_max_concurrent_runs,omitempty"`
// SourceLinkedDeployment indicates whether source-linked deployment is enabled. Works only in Databricks Workspace
// When set to true, resources created during deployment will point to source files in the workspace instead of their workspace copies.
// File synchronization to ${workspace.file_path} is skipped.
SourceLinkedDeployment *bool `json:"source_linked_deployment,omitempty"`
// Tags to add to all resources.
Tags map[string]string `json:"tags,omitempty"`

View File

@ -41,6 +41,9 @@ type ConfigResource interface {
// InitializeURL initializes the URL field of the resource.
InitializeURL(baseURL url.URL)
// IsNil returns true if the resource is nil, for example, when it was removed from the bundle.
IsNil() bool
}
// ResourceGroup represents a group of resources of the same type.
@ -57,6 +60,9 @@ func collectResourceMap[T ConfigResource](
) ResourceGroup {
resources := make(map[string]ConfigResource)
for key, resource := range input {
if resource.IsNil() {
continue
}
resources[key] = resource
}
return ResourceGroup{

View File

@ -56,3 +56,7 @@ func (s *Cluster) GetName() string {
func (s *Cluster) GetURL() string {
return s.URL
}
func (s *Cluster) IsNil() bool {
return s.ClusterSpec == nil
}

View File

@ -17,7 +17,7 @@ type Dashboard struct {
ModifiedStatus ModifiedStatus `json:"modified_status,omitempty" bundle:"internal"`
URL string `json:"url,omitempty" bundle:"internal"`
*dashboards.CreateDashboardRequest
*dashboards.Dashboard
// =========================
// === Additional fields ===
@ -79,3 +79,7 @@ func (r *Dashboard) GetName() string {
func (r *Dashboard) GetURL() string {
return r.URL
}
func (r *Dashboard) IsNil() bool {
return r.Dashboard == nil
}

View File

@ -63,3 +63,7 @@ func (j *Job) GetName() string {
func (j *Job) GetURL() string {
return j.URL
}
func (j *Job) IsNil() bool {
return j.JobSettings == nil
}

View File

@ -58,3 +58,7 @@ func (s *MlflowExperiment) GetName() string {
func (s *MlflowExperiment) GetURL() string {
return s.URL
}
func (s *MlflowExperiment) IsNil() bool {
return s.Experiment == nil
}

View File

@ -58,3 +58,7 @@ func (s *MlflowModel) GetName() string {
func (s *MlflowModel) GetURL() string {
return s.URL
}
func (s *MlflowModel) IsNil() bool {
return s.Model == nil
}

View File

@ -66,3 +66,7 @@ func (s *ModelServingEndpoint) GetName() string {
func (s *ModelServingEndpoint) GetURL() string {
return s.URL
}
func (s *ModelServingEndpoint) IsNil() bool {
return s.CreateServingEndpoint == nil
}

View File

@ -58,3 +58,7 @@ func (p *Pipeline) GetName() string {
func (s *Pipeline) GetURL() string {
return s.URL
}
func (s *Pipeline) IsNil() bool {
return s.PipelineSpec == nil
}

View File

@ -13,17 +13,15 @@ import (
)
type QualityMonitor struct {
// Represents the Input Arguments for Terraform and will get
// converted to a HCL representation for CRUD
*catalog.CreateMonitor
// This represents the id which is the full name of the monitor
// (catalog_name.schema_name.table_name) that can be used
// as a reference in other resources. This value is returned by terraform.
ID string `json:"id,omitempty" bundle:"readonly"`
ID string `json:"id,omitempty" bundle:"readonly"`
ModifiedStatus ModifiedStatus `json:"modified_status,omitempty" bundle:"internal"`
URL string `json:"url,omitempty" bundle:"internal"`
// The table name is a required field but not included as a JSON field in [catalog.CreateMonitor].
TableName string `json:"table_name"`
// This struct defines the creation payload for a monitor.
*catalog.CreateMonitor
}
func (s *QualityMonitor) UnmarshalJSON(b []byte) error {
@ -64,3 +62,7 @@ func (s *QualityMonitor) GetName() string {
func (s *QualityMonitor) GetURL() string {
return s.URL
}
func (s *QualityMonitor) IsNil() bool {
return s.CreateMonitor == nil
}

View File

@ -68,3 +68,7 @@ func (s *RegisteredModel) GetName() string {
func (s *RegisteredModel) GetURL() string {
return s.URL
}
func (s *RegisteredModel) IsNil() bool {
return s.CreateRegisteredModelRequest == nil
}

View File

@ -56,3 +56,7 @@ func (s *Schema) UnmarshalJSON(b []byte) error {
func (s Schema) MarshalJSON() ([]byte, error) {
return marshal.Marshal(s)
}
func (s *Schema) IsNil() bool {
return s.CreateSchema == nil
}

View File

@ -21,6 +21,12 @@ func (v *filesToSync) Name() string {
}
func (v *filesToSync) Apply(ctx context.Context, rb bundle.ReadOnlyBundle) diag.Diagnostics {
// The user may be intentional about not synchronizing any files.
// In this case, we should not show any warnings.
if len(rb.Config().Sync.Paths) == 0 {
return nil
}
sync, err := files.GetSync(ctx, rb)
if err != nil {
return diag.FromErr(err)
@ -31,6 +37,7 @@ func (v *filesToSync) Apply(ctx context.Context, rb bundle.ReadOnlyBundle) diag.
return diag.FromErr(err)
}
// If there are files to sync, we don't need to show any warnings.
if len(fl) != 0 {
return nil
}

View File

@ -0,0 +1,105 @@
package validate
import (
"context"
"testing"
"github.com/databricks/cli/bundle"
"github.com/databricks/cli/bundle/config"
"github.com/databricks/cli/internal/testutil"
"github.com/databricks/cli/libs/diag"
"github.com/databricks/cli/libs/vfs"
sdkconfig "github.com/databricks/databricks-sdk-go/config"
"github.com/databricks/databricks-sdk-go/experimental/mocks"
"github.com/databricks/databricks-sdk-go/service/iam"
"github.com/databricks/databricks-sdk-go/service/workspace"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
)
func TestFilesToSync_NoPaths(t *testing.T) {
b := &bundle.Bundle{
Config: config.Root{
Sync: config.Sync{
Paths: []string{},
},
},
}
ctx := context.Background()
rb := bundle.ReadOnly(b)
diags := bundle.ApplyReadOnly(ctx, rb, FilesToSync())
assert.Empty(t, diags)
}
func setupBundleForFilesToSyncTest(t *testing.T) *bundle.Bundle {
dir := t.TempDir()
testutil.Touch(t, dir, "file1")
testutil.Touch(t, dir, "file2")
b := &bundle.Bundle{
BundleRootPath: dir,
BundleRoot: vfs.MustNew(dir),
SyncRootPath: dir,
SyncRoot: vfs.MustNew(dir),
Config: config.Root{
Bundle: config.Bundle{
Target: "default",
},
Workspace: config.Workspace{
FilePath: "/this/doesnt/matter",
CurrentUser: &config.User{
User: &iam.User{},
},
},
Sync: config.Sync{
// Paths are relative to [SyncRootPath].
Paths: []string{"."},
},
},
}
m := mocks.NewMockWorkspaceClient(t)
m.WorkspaceClient.Config = &sdkconfig.Config{
Host: "https://foo.com",
}
// The initialization logic in [sync.New] performs a check on the destination path.
// Removing this check at initialization time is tbd...
m.GetMockWorkspaceAPI().EXPECT().GetStatusByPath(mock.Anything, "/this/doesnt/matter").Return(&workspace.ObjectInfo{
ObjectType: workspace.ObjectTypeDirectory,
}, nil)
b.SetWorkpaceClient(m.WorkspaceClient)
return b
}
func TestFilesToSync_EverythingIgnored(t *testing.T) {
b := setupBundleForFilesToSyncTest(t)
// Ignore all files.
testutil.WriteFile(t, "*\n.*\n", b.BundleRootPath, ".gitignore")
ctx := context.Background()
rb := bundle.ReadOnly(b)
diags := bundle.ApplyReadOnly(ctx, rb, FilesToSync())
require.Equal(t, 1, len(diags))
assert.Equal(t, diag.Warning, diags[0].Severity)
assert.Equal(t, "There are no files to sync, please check your .gitignore", diags[0].Summary)
}
func TestFilesToSync_EverythingExcluded(t *testing.T) {
b := setupBundleForFilesToSyncTest(t)
// Exclude all files.
b.Config.Sync.Exclude = []string{"*"}
ctx := context.Background()
rb := bundle.ReadOnly(b)
diags := bundle.ApplyReadOnly(ctx, rb, FilesToSync())
require.Equal(t, 1, len(diags))
assert.Equal(t, diag.Warning, diags[0].Severity)
assert.Equal(t, "There are no files to sync, please check your .gitignore and sync.exclude configuration", diags[0].Summary)
}

View File

@ -0,0 +1,137 @@
package validate
import (
"context"
"strings"
"github.com/databricks/cli/bundle"
"github.com/databricks/cli/libs/diag"
"github.com/databricks/cli/libs/dyn"
"github.com/databricks/cli/libs/dyn/convert"
"github.com/databricks/cli/libs/log"
)
// Validates that any single node clusters defined in the bundle are correctly configured.
func SingleNodeCluster() bundle.ReadOnlyMutator {
return &singleNodeCluster{}
}
type singleNodeCluster struct{}
func (m *singleNodeCluster) Name() string {
return "validate:SingleNodeCluster"
}
const singleNodeWarningDetail = `num_workers should be 0 only for single-node clusters. To create a
valid single node cluster please ensure that the following properties
are correctly set in the cluster specification:
spark_conf:
spark.databricks.cluster.profile: singleNode
spark.master: local[*]
custom_tags:
ResourceClass: SingleNode
`
const singleNodeWarningSummary = `Single node cluster is not correctly configured`
func showSingleNodeClusterWarning(ctx context.Context, v dyn.Value) bool {
// Check if the user has explicitly set the num_workers to 0. Skip the warning
// if that's not the case.
numWorkers, ok := v.Get("num_workers").AsInt()
if !ok || numWorkers > 0 {
return false
}
// Convenient type that contains the common fields from compute.ClusterSpec and
// pipelines.PipelineCluster that we are interested in.
type ClusterConf struct {
SparkConf map[string]string `json:"spark_conf"`
CustomTags map[string]string `json:"custom_tags"`
PolicyId string `json:"policy_id"`
}
conf := &ClusterConf{}
err := convert.ToTyped(conf, v)
if err != nil {
return false
}
// If the policy id is set, we don't want to show the warning. This is because
// the user might have configured `spark_conf` and `custom_tags` correctly
// in their cluster policy.
if conf.PolicyId != "" {
return false
}
profile, ok := conf.SparkConf["spark.databricks.cluster.profile"]
if !ok {
log.Debugf(ctx, "spark_conf spark.databricks.cluster.profile not found in single-node cluster spec")
return true
}
if profile != "singleNode" {
log.Debugf(ctx, "spark_conf spark.databricks.cluster.profile is not singleNode in single-node cluster spec: %s", profile)
return true
}
master, ok := conf.SparkConf["spark.master"]
if !ok {
log.Debugf(ctx, "spark_conf spark.master not found in single-node cluster spec")
return true
}
if !strings.HasPrefix(master, "local") {
log.Debugf(ctx, "spark_conf spark.master does not start with local in single-node cluster spec: %s", master)
return true
}
resourceClass, ok := conf.CustomTags["ResourceClass"]
if !ok {
log.Debugf(ctx, "custom_tag ResourceClass not found in single-node cluster spec")
return true
}
if resourceClass != "SingleNode" {
log.Debugf(ctx, "custom_tag ResourceClass is not SingleNode in single-node cluster spec: %s", resourceClass)
return true
}
return false
}
func (m *singleNodeCluster) Apply(ctx context.Context, rb bundle.ReadOnlyBundle) diag.Diagnostics {
diags := diag.Diagnostics{}
patterns := []dyn.Pattern{
// Interactive clusters
dyn.NewPattern(dyn.Key("resources"), dyn.Key("clusters"), dyn.AnyKey()),
// Job clusters
dyn.NewPattern(dyn.Key("resources"), dyn.Key("jobs"), dyn.AnyKey(), dyn.Key("job_clusters"), dyn.AnyIndex(), dyn.Key("new_cluster")),
// Job task clusters
dyn.NewPattern(dyn.Key("resources"), dyn.Key("jobs"), dyn.AnyKey(), dyn.Key("tasks"), dyn.AnyIndex(), dyn.Key("new_cluster")),
// Job for each task clusters
dyn.NewPattern(dyn.Key("resources"), dyn.Key("jobs"), dyn.AnyKey(), dyn.Key("tasks"), dyn.AnyIndex(), dyn.Key("for_each_task"), dyn.Key("task"), dyn.Key("new_cluster")),
// Pipeline clusters
dyn.NewPattern(dyn.Key("resources"), dyn.Key("pipelines"), dyn.AnyKey(), dyn.Key("clusters"), dyn.AnyIndex()),
}
for _, p := range patterns {
_, err := dyn.MapByPattern(rb.Config().Value(), p, func(p dyn.Path, v dyn.Value) (dyn.Value, error) {
warning := diag.Diagnostic{
Severity: diag.Warning,
Summary: singleNodeWarningSummary,
Detail: singleNodeWarningDetail,
Locations: v.Locations(),
Paths: []dyn.Path{p},
}
if showSingleNodeClusterWarning(ctx, v) {
diags = append(diags, warning)
}
return v, nil
})
if err != nil {
log.Debugf(ctx, "Error while applying single node cluster validation: %s", err)
}
}
return diags
}

View File

@ -0,0 +1,566 @@
package validate
import (
"context"
"testing"
"github.com/databricks/cli/bundle"
"github.com/databricks/cli/bundle/config"
"github.com/databricks/cli/bundle/config/resources"
"github.com/databricks/cli/bundle/internal/bundletest"
"github.com/databricks/cli/libs/diag"
"github.com/databricks/cli/libs/dyn"
"github.com/databricks/databricks-sdk-go/service/compute"
"github.com/databricks/databricks-sdk-go/service/jobs"
"github.com/databricks/databricks-sdk-go/service/pipelines"
"github.com/stretchr/testify/assert"
)
func failCases() []struct {
name string
sparkConf map[string]string
customTags map[string]string
} {
return []struct {
name string
sparkConf map[string]string
customTags map[string]string
}{
{
name: "no tags or conf",
},
{
name: "no tags",
sparkConf: map[string]string{
"spark.databricks.cluster.profile": "singleNode",
"spark.master": "local[*]",
},
},
{
name: "no conf",
customTags: map[string]string{"ResourceClass": "SingleNode"},
},
{
name: "invalid spark cluster profile",
sparkConf: map[string]string{
"spark.databricks.cluster.profile": "invalid",
"spark.master": "local[*]",
},
customTags: map[string]string{"ResourceClass": "SingleNode"},
},
{
name: "invalid spark.master",
sparkConf: map[string]string{
"spark.databricks.cluster.profile": "singleNode",
"spark.master": "invalid",
},
customTags: map[string]string{"ResourceClass": "SingleNode"},
},
{
name: "invalid tags",
sparkConf: map[string]string{
"spark.databricks.cluster.profile": "singleNode",
"spark.master": "local[*]",
},
customTags: map[string]string{"ResourceClass": "invalid"},
},
{
name: "missing ResourceClass tag",
sparkConf: map[string]string{
"spark.databricks.cluster.profile": "singleNode",
"spark.master": "local[*]",
},
customTags: map[string]string{"what": "ever"},
},
{
name: "missing spark.master",
sparkConf: map[string]string{
"spark.databricks.cluster.profile": "singleNode",
},
customTags: map[string]string{"ResourceClass": "SingleNode"},
},
{
name: "missing spark.databricks.cluster.profile",
sparkConf: map[string]string{
"spark.master": "local[*]",
},
customTags: map[string]string{"ResourceClass": "SingleNode"},
},
}
}
func TestValidateSingleNodeClusterFailForInteractiveClusters(t *testing.T) {
ctx := context.Background()
for _, tc := range failCases() {
t.Run(tc.name, func(t *testing.T) {
b := &bundle.Bundle{
Config: config.Root{
Resources: config.Resources{
Clusters: map[string]*resources.Cluster{
"foo": {
ClusterSpec: &compute.ClusterSpec{
SparkConf: tc.sparkConf,
CustomTags: tc.customTags,
},
},
},
},
},
}
bundletest.SetLocation(b, "resources.clusters.foo", []dyn.Location{{File: "a.yml", Line: 1, Column: 1}})
// We can't set num_workers to 0 explicitly in the typed configuration.
// Do it on the dyn.Value directly.
bundletest.Mutate(t, b, func(v dyn.Value) (dyn.Value, error) {
return dyn.Set(v, "resources.clusters.foo.num_workers", dyn.V(0))
})
diags := bundle.ApplyReadOnly(ctx, bundle.ReadOnly(b), SingleNodeCluster())
assert.Equal(t, diag.Diagnostics{
{
Severity: diag.Warning,
Summary: singleNodeWarningSummary,
Detail: singleNodeWarningDetail,
Locations: []dyn.Location{{File: "a.yml", Line: 1, Column: 1}},
Paths: []dyn.Path{dyn.NewPath(dyn.Key("resources"), dyn.Key("clusters"), dyn.Key("foo"))},
},
}, diags)
})
}
}
func TestValidateSingleNodeClusterFailForJobClusters(t *testing.T) {
ctx := context.Background()
for _, tc := range failCases() {
t.Run(tc.name, func(t *testing.T) {
b := &bundle.Bundle{
Config: config.Root{
Resources: config.Resources{
Jobs: map[string]*resources.Job{
"foo": {
JobSettings: &jobs.JobSettings{
JobClusters: []jobs.JobCluster{
{
NewCluster: compute.ClusterSpec{
ClusterName: "my_cluster",
SparkConf: tc.sparkConf,
CustomTags: tc.customTags,
},
},
},
},
},
},
},
},
}
bundletest.SetLocation(b, "resources.jobs.foo.job_clusters[0].new_cluster", []dyn.Location{{File: "b.yml", Line: 1, Column: 1}})
// We can't set num_workers to 0 explicitly in the typed configuration.
// Do it on the dyn.Value directly.
bundletest.Mutate(t, b, func(v dyn.Value) (dyn.Value, error) {
return dyn.Set(v, "resources.jobs.foo.job_clusters[0].new_cluster.num_workers", dyn.V(0))
})
diags := bundle.ApplyReadOnly(ctx, bundle.ReadOnly(b), SingleNodeCluster())
assert.Equal(t, diag.Diagnostics{
{
Severity: diag.Warning,
Summary: singleNodeWarningSummary,
Detail: singleNodeWarningDetail,
Locations: []dyn.Location{{File: "b.yml", Line: 1, Column: 1}},
Paths: []dyn.Path{dyn.MustPathFromString("resources.jobs.foo.job_clusters[0].new_cluster")},
},
}, diags)
})
}
}
func TestValidateSingleNodeClusterFailForJobTaskClusters(t *testing.T) {
ctx := context.Background()
for _, tc := range failCases() {
t.Run(tc.name, func(t *testing.T) {
b := &bundle.Bundle{
Config: config.Root{
Resources: config.Resources{
Jobs: map[string]*resources.Job{
"foo": {
JobSettings: &jobs.JobSettings{
Tasks: []jobs.Task{
{
NewCluster: &compute.ClusterSpec{
ClusterName: "my_cluster",
SparkConf: tc.sparkConf,
CustomTags: tc.customTags,
},
},
},
},
},
},
},
},
}
bundletest.SetLocation(b, "resources.jobs.foo.tasks[0].new_cluster", []dyn.Location{{File: "c.yml", Line: 1, Column: 1}})
// We can't set num_workers to 0 explicitly in the typed configuration.
// Do it on the dyn.Value directly.
bundletest.Mutate(t, b, func(v dyn.Value) (dyn.Value, error) {
return dyn.Set(v, "resources.jobs.foo.tasks[0].new_cluster.num_workers", dyn.V(0))
})
diags := bundle.ApplyReadOnly(ctx, bundle.ReadOnly(b), SingleNodeCluster())
assert.Equal(t, diag.Diagnostics{
{
Severity: diag.Warning,
Summary: singleNodeWarningSummary,
Detail: singleNodeWarningDetail,
Locations: []dyn.Location{{File: "c.yml", Line: 1, Column: 1}},
Paths: []dyn.Path{dyn.MustPathFromString("resources.jobs.foo.tasks[0].new_cluster")},
},
}, diags)
})
}
}
func TestValidateSingleNodeClusterFailForPipelineClusters(t *testing.T) {
ctx := context.Background()
for _, tc := range failCases() {
t.Run(tc.name, func(t *testing.T) {
b := &bundle.Bundle{
Config: config.Root{
Resources: config.Resources{
Pipelines: map[string]*resources.Pipeline{
"foo": {
PipelineSpec: &pipelines.PipelineSpec{
Clusters: []pipelines.PipelineCluster{
{
SparkConf: tc.sparkConf,
CustomTags: tc.customTags,
},
},
},
},
},
},
},
}
bundletest.SetLocation(b, "resources.pipelines.foo.clusters[0]", []dyn.Location{{File: "d.yml", Line: 1, Column: 1}})
// We can't set num_workers to 0 explicitly in the typed configuration.
// Do it on the dyn.Value directly.
bundletest.Mutate(t, b, func(v dyn.Value) (dyn.Value, error) {
return dyn.Set(v, "resources.pipelines.foo.clusters[0].num_workers", dyn.V(0))
})
diags := bundle.ApplyReadOnly(ctx, bundle.ReadOnly(b), SingleNodeCluster())
assert.Equal(t, diag.Diagnostics{
{
Severity: diag.Warning,
Summary: singleNodeWarningSummary,
Detail: singleNodeWarningDetail,
Locations: []dyn.Location{{File: "d.yml", Line: 1, Column: 1}},
Paths: []dyn.Path{dyn.MustPathFromString("resources.pipelines.foo.clusters[0]")},
},
}, diags)
})
}
}
func TestValidateSingleNodeClusterFailForJobForEachTaskCluster(t *testing.T) {
ctx := context.Background()
for _, tc := range failCases() {
t.Run(tc.name, func(t *testing.T) {
b := &bundle.Bundle{
Config: config.Root{
Resources: config.Resources{
Jobs: map[string]*resources.Job{
"foo": {
JobSettings: &jobs.JobSettings{
Tasks: []jobs.Task{
{
ForEachTask: &jobs.ForEachTask{
Task: jobs.Task{
NewCluster: &compute.ClusterSpec{
ClusterName: "my_cluster",
SparkConf: tc.sparkConf,
CustomTags: tc.customTags,
},
},
},
},
},
},
},
},
},
},
}
bundletest.SetLocation(b, "resources.jobs.foo.tasks[0].for_each_task.task.new_cluster", []dyn.Location{{File: "e.yml", Line: 1, Column: 1}})
// We can't set num_workers to 0 explicitly in the typed configuration.
// Do it on the dyn.Value directly.
bundletest.Mutate(t, b, func(v dyn.Value) (dyn.Value, error) {
return dyn.Set(v, "resources.jobs.foo.tasks[0].for_each_task.task.new_cluster.num_workers", dyn.V(0))
})
diags := bundle.ApplyReadOnly(ctx, bundle.ReadOnly(b), SingleNodeCluster())
assert.Equal(t, diag.Diagnostics{
{
Severity: diag.Warning,
Summary: singleNodeWarningSummary,
Detail: singleNodeWarningDetail,
Locations: []dyn.Location{{File: "e.yml", Line: 1, Column: 1}},
Paths: []dyn.Path{dyn.MustPathFromString("resources.jobs.foo.tasks[0].for_each_task.task.new_cluster")},
},
}, diags)
})
}
}
func passCases() []struct {
name string
numWorkers *int
sparkConf map[string]string
customTags map[string]string
policyId string
} {
zero := 0
one := 1
return []struct {
name string
numWorkers *int
sparkConf map[string]string
customTags map[string]string
policyId string
}{
{
name: "single node cluster",
sparkConf: map[string]string{
"spark.databricks.cluster.profile": "singleNode",
"spark.master": "local[*]",
},
customTags: map[string]string{
"ResourceClass": "SingleNode",
},
numWorkers: &zero,
},
{
name: "num workers is not zero",
numWorkers: &one,
},
{
name: "num workers is not set",
},
{
name: "policy id is not empty",
policyId: "policy-abc",
numWorkers: &zero,
},
}
}
func TestValidateSingleNodeClusterPassInteractiveClusters(t *testing.T) {
ctx := context.Background()
for _, tc := range passCases() {
t.Run(tc.name, func(t *testing.T) {
b := &bundle.Bundle{
Config: config.Root{
Resources: config.Resources{
Clusters: map[string]*resources.Cluster{
"foo": {
ClusterSpec: &compute.ClusterSpec{
SparkConf: tc.sparkConf,
CustomTags: tc.customTags,
PolicyId: tc.policyId,
},
},
},
},
},
}
if tc.numWorkers != nil {
bundletest.Mutate(t, b, func(v dyn.Value) (dyn.Value, error) {
return dyn.Set(v, "resources.clusters.foo.num_workers", dyn.V(*tc.numWorkers))
})
}
diags := bundle.ApplyReadOnly(ctx, bundle.ReadOnly(b), SingleNodeCluster())
assert.Empty(t, diags)
})
}
}
func TestValidateSingleNodeClusterPassJobClusters(t *testing.T) {
ctx := context.Background()
for _, tc := range passCases() {
t.Run(tc.name, func(t *testing.T) {
b := &bundle.Bundle{
Config: config.Root{
Resources: config.Resources{
Jobs: map[string]*resources.Job{
"foo": {
JobSettings: &jobs.JobSettings{
JobClusters: []jobs.JobCluster{
{
NewCluster: compute.ClusterSpec{
ClusterName: "my_cluster",
SparkConf: tc.sparkConf,
CustomTags: tc.customTags,
PolicyId: tc.policyId,
},
},
},
},
},
},
},
},
}
if tc.numWorkers != nil {
bundletest.Mutate(t, b, func(v dyn.Value) (dyn.Value, error) {
return dyn.Set(v, "resources.jobs.foo.job_clusters[0].new_cluster.num_workers", dyn.V(*tc.numWorkers))
})
}
diags := bundle.ApplyReadOnly(ctx, bundle.ReadOnly(b), SingleNodeCluster())
assert.Empty(t, diags)
})
}
}
func TestValidateSingleNodeClusterPassJobTaskClusters(t *testing.T) {
ctx := context.Background()
for _, tc := range passCases() {
t.Run(tc.name, func(t *testing.T) {
b := &bundle.Bundle{
Config: config.Root{
Resources: config.Resources{
Jobs: map[string]*resources.Job{
"foo": {
JobSettings: &jobs.JobSettings{
Tasks: []jobs.Task{
{
NewCluster: &compute.ClusterSpec{
ClusterName: "my_cluster",
SparkConf: tc.sparkConf,
CustomTags: tc.customTags,
PolicyId: tc.policyId,
},
},
},
},
},
},
},
},
}
if tc.numWorkers != nil {
bundletest.Mutate(t, b, func(v dyn.Value) (dyn.Value, error) {
return dyn.Set(v, "resources.jobs.foo.tasks[0].new_cluster.num_workers", dyn.V(*tc.numWorkers))
})
}
diags := bundle.ApplyReadOnly(ctx, bundle.ReadOnly(b), SingleNodeCluster())
assert.Empty(t, diags)
})
}
}
func TestValidateSingleNodeClusterPassPipelineClusters(t *testing.T) {
ctx := context.Background()
for _, tc := range passCases() {
t.Run(tc.name, func(t *testing.T) {
b := &bundle.Bundle{
Config: config.Root{
Resources: config.Resources{
Pipelines: map[string]*resources.Pipeline{
"foo": {
PipelineSpec: &pipelines.PipelineSpec{
Clusters: []pipelines.PipelineCluster{
{
SparkConf: tc.sparkConf,
CustomTags: tc.customTags,
PolicyId: tc.policyId,
},
},
},
},
},
},
},
}
if tc.numWorkers != nil {
bundletest.Mutate(t, b, func(v dyn.Value) (dyn.Value, error) {
return dyn.Set(v, "resources.pipelines.foo.clusters[0].num_workers", dyn.V(*tc.numWorkers))
})
}
diags := bundle.ApplyReadOnly(ctx, bundle.ReadOnly(b), SingleNodeCluster())
assert.Empty(t, diags)
})
}
}
func TestValidateSingleNodeClusterPassJobForEachTaskCluster(t *testing.T) {
ctx := context.Background()
for _, tc := range passCases() {
t.Run(tc.name, func(t *testing.T) {
b := &bundle.Bundle{
Config: config.Root{
Resources: config.Resources{
Jobs: map[string]*resources.Job{
"foo": {
JobSettings: &jobs.JobSettings{
Tasks: []jobs.Task{
{
ForEachTask: &jobs.ForEachTask{
Task: jobs.Task{
NewCluster: &compute.ClusterSpec{
ClusterName: "my_cluster",
SparkConf: tc.sparkConf,
CustomTags: tc.customTags,
PolicyId: tc.policyId,
},
},
},
},
},
},
},
},
},
},
}
if tc.numWorkers != nil {
bundletest.Mutate(t, b, func(v dyn.Value) (dyn.Value, error) {
return dyn.Set(v, "resources.jobs.foo.tasks[0].for_each_task.task.new_cluster.num_workers", dyn.V(*tc.numWorkers))
})
}
diags := bundle.ApplyReadOnly(ctx, bundle.ReadOnly(b), SingleNodeCluster())
assert.Empty(t, diags)
})
}
}

View File

@ -36,6 +36,7 @@ func (v *validate) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics
ValidateSyncPatterns(),
JobTaskClusterSpec(),
ValidateFolderPermissions(),
SingleNodeCluster(),
))
}

View File

@ -1,11 +1,8 @@
// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT.
package variable
import (
"context"
"fmt"
"strings"
"github.com/databricks/databricks-sdk-go"
)
@ -25,6 +22,8 @@ type Lookup struct {
Metastore string `json:"metastore,omitempty"`
NotificationDestination string `json:"notification_destination,omitempty"`
Pipeline string `json:"pipeline,omitempty"`
Query string `json:"query,omitempty"`
@ -34,323 +33,78 @@ type Lookup struct {
Warehouse string `json:"warehouse,omitempty"`
}
func LookupFromMap(m map[string]any) *Lookup {
l := &Lookup{}
if v, ok := m["alert"]; ok {
l.Alert = v.(string)
type resolver interface {
// Resolve resolves the underlying entity's ID.
Resolve(ctx context.Context, w *databricks.WorkspaceClient) (string, error)
// String returns a human-readable representation of the resolver.
String() string
}
func (l *Lookup) constructResolver() (resolver, error) {
var resolvers []resolver
if l.Alert != "" {
resolvers = append(resolvers, resolveAlert{name: l.Alert})
}
if v, ok := m["cluster_policy"]; ok {
l.ClusterPolicy = v.(string)
if l.ClusterPolicy != "" {
resolvers = append(resolvers, resolveClusterPolicy{name: l.ClusterPolicy})
}
if v, ok := m["cluster"]; ok {
l.Cluster = v.(string)
if l.Cluster != "" {
resolvers = append(resolvers, resolveCluster{name: l.Cluster})
}
if v, ok := m["dashboard"]; ok {
l.Dashboard = v.(string)
if l.Dashboard != "" {
resolvers = append(resolvers, resolveDashboard{name: l.Dashboard})
}
if v, ok := m["instance_pool"]; ok {
l.InstancePool = v.(string)
if l.InstancePool != "" {
resolvers = append(resolvers, resolveInstancePool{name: l.InstancePool})
}
if v, ok := m["job"]; ok {
l.Job = v.(string)
if l.Job != "" {
resolvers = append(resolvers, resolveJob{name: l.Job})
}
if v, ok := m["metastore"]; ok {
l.Metastore = v.(string)
if l.Metastore != "" {
resolvers = append(resolvers, resolveMetastore{name: l.Metastore})
}
if v, ok := m["pipeline"]; ok {
l.Pipeline = v.(string)
if l.NotificationDestination != "" {
resolvers = append(resolvers, resolveNotificationDestination{name: l.NotificationDestination})
}
if v, ok := m["query"]; ok {
l.Query = v.(string)
if l.Pipeline != "" {
resolvers = append(resolvers, resolvePipeline{name: l.Pipeline})
}
if v, ok := m["service_principal"]; ok {
l.ServicePrincipal = v.(string)
if l.Query != "" {
resolvers = append(resolvers, resolveQuery{name: l.Query})
}
if v, ok := m["warehouse"]; ok {
l.Warehouse = v.(string)
if l.ServicePrincipal != "" {
resolvers = append(resolvers, resolveServicePrincipal{name: l.ServicePrincipal})
}
if l.Warehouse != "" {
resolvers = append(resolvers, resolveWarehouse{name: l.Warehouse})
}
return l
switch len(resolvers) {
case 0:
return nil, fmt.Errorf("no valid lookup fields provided")
case 1:
return resolvers[0], nil
default:
return nil, fmt.Errorf("exactly one lookup field must be provided")
}
}
func (l *Lookup) Resolve(ctx context.Context, w *databricks.WorkspaceClient) (string, error) {
if err := l.validate(); err != nil {
r, err := l.constructResolver()
if err != nil {
return "", err
}
r := allResolvers()
if l.Alert != "" {
return r.Alert(ctx, w, l.Alert)
}
if l.ClusterPolicy != "" {
return r.ClusterPolicy(ctx, w, l.ClusterPolicy)
}
if l.Cluster != "" {
return r.Cluster(ctx, w, l.Cluster)
}
if l.Dashboard != "" {
return r.Dashboard(ctx, w, l.Dashboard)
}
if l.InstancePool != "" {
return r.InstancePool(ctx, w, l.InstancePool)
}
if l.Job != "" {
return r.Job(ctx, w, l.Job)
}
if l.Metastore != "" {
return r.Metastore(ctx, w, l.Metastore)
}
if l.Pipeline != "" {
return r.Pipeline(ctx, w, l.Pipeline)
}
if l.Query != "" {
return r.Query(ctx, w, l.Query)
}
if l.ServicePrincipal != "" {
return r.ServicePrincipal(ctx, w, l.ServicePrincipal)
}
if l.Warehouse != "" {
return r.Warehouse(ctx, w, l.Warehouse)
}
return "", fmt.Errorf("no valid lookup fields provided")
return r.Resolve(ctx, w)
}
func (l *Lookup) String() string {
if l.Alert != "" {
return fmt.Sprintf("alert: %s", l.Alert)
}
if l.ClusterPolicy != "" {
return fmt.Sprintf("cluster-policy: %s", l.ClusterPolicy)
}
if l.Cluster != "" {
return fmt.Sprintf("cluster: %s", l.Cluster)
}
if l.Dashboard != "" {
return fmt.Sprintf("dashboard: %s", l.Dashboard)
}
if l.InstancePool != "" {
return fmt.Sprintf("instance-pool: %s", l.InstancePool)
}
if l.Job != "" {
return fmt.Sprintf("job: %s", l.Job)
}
if l.Metastore != "" {
return fmt.Sprintf("metastore: %s", l.Metastore)
}
if l.Pipeline != "" {
return fmt.Sprintf("pipeline: %s", l.Pipeline)
}
if l.Query != "" {
return fmt.Sprintf("query: %s", l.Query)
}
if l.ServicePrincipal != "" {
return fmt.Sprintf("service-principal: %s", l.ServicePrincipal)
}
if l.Warehouse != "" {
return fmt.Sprintf("warehouse: %s", l.Warehouse)
r, _ := l.constructResolver()
if r == nil {
return ""
}
return ""
}
func (l *Lookup) validate() error {
// Validate that only one field is set
count := 0
if l.Alert != "" {
count++
}
if l.ClusterPolicy != "" {
count++
}
if l.Cluster != "" {
count++
}
if l.Dashboard != "" {
count++
}
if l.InstancePool != "" {
count++
}
if l.Job != "" {
count++
}
if l.Metastore != "" {
count++
}
if l.Pipeline != "" {
count++
}
if l.Query != "" {
count++
}
if l.ServicePrincipal != "" {
count++
}
if l.Warehouse != "" {
count++
}
if count != 1 {
return fmt.Errorf("exactly one lookup field must be provided")
}
if strings.Contains(l.String(), "${var") {
return fmt.Errorf("lookup fields cannot contain variable references")
}
return nil
}
type resolverFunc func(ctx context.Context, w *databricks.WorkspaceClient, name string) (string, error)
type resolvers struct {
Alert resolverFunc
ClusterPolicy resolverFunc
Cluster resolverFunc
Dashboard resolverFunc
InstancePool resolverFunc
Job resolverFunc
Metastore resolverFunc
Pipeline resolverFunc
Query resolverFunc
ServicePrincipal resolverFunc
Warehouse resolverFunc
}
func allResolvers() *resolvers {
r := &resolvers{}
r.Alert = func(ctx context.Context, w *databricks.WorkspaceClient, name string) (string, error) {
fn, ok := lookupOverrides["Alert"]
if ok {
return fn(ctx, w, name)
}
entity, err := w.Alerts.GetByDisplayName(ctx, name)
if err != nil {
return "", err
}
return fmt.Sprint(entity.Id), nil
}
r.ClusterPolicy = func(ctx context.Context, w *databricks.WorkspaceClient, name string) (string, error) {
fn, ok := lookupOverrides["ClusterPolicy"]
if ok {
return fn(ctx, w, name)
}
entity, err := w.ClusterPolicies.GetByName(ctx, name)
if err != nil {
return "", err
}
return fmt.Sprint(entity.PolicyId), nil
}
r.Cluster = func(ctx context.Context, w *databricks.WorkspaceClient, name string) (string, error) {
fn, ok := lookupOverrides["Cluster"]
if ok {
return fn(ctx, w, name)
}
entity, err := w.Clusters.GetByClusterName(ctx, name)
if err != nil {
return "", err
}
return fmt.Sprint(entity.ClusterId), nil
}
r.Dashboard = func(ctx context.Context, w *databricks.WorkspaceClient, name string) (string, error) {
fn, ok := lookupOverrides["Dashboard"]
if ok {
return fn(ctx, w, name)
}
entity, err := w.Dashboards.GetByName(ctx, name)
if err != nil {
return "", err
}
return fmt.Sprint(entity.Id), nil
}
r.InstancePool = func(ctx context.Context, w *databricks.WorkspaceClient, name string) (string, error) {
fn, ok := lookupOverrides["InstancePool"]
if ok {
return fn(ctx, w, name)
}
entity, err := w.InstancePools.GetByInstancePoolName(ctx, name)
if err != nil {
return "", err
}
return fmt.Sprint(entity.InstancePoolId), nil
}
r.Job = func(ctx context.Context, w *databricks.WorkspaceClient, name string) (string, error) {
fn, ok := lookupOverrides["Job"]
if ok {
return fn(ctx, w, name)
}
entity, err := w.Jobs.GetBySettingsName(ctx, name)
if err != nil {
return "", err
}
return fmt.Sprint(entity.JobId), nil
}
r.Metastore = func(ctx context.Context, w *databricks.WorkspaceClient, name string) (string, error) {
fn, ok := lookupOverrides["Metastore"]
if ok {
return fn(ctx, w, name)
}
entity, err := w.Metastores.GetByName(ctx, name)
if err != nil {
return "", err
}
return fmt.Sprint(entity.MetastoreId), nil
}
r.Pipeline = func(ctx context.Context, w *databricks.WorkspaceClient, name string) (string, error) {
fn, ok := lookupOverrides["Pipeline"]
if ok {
return fn(ctx, w, name)
}
entity, err := w.Pipelines.GetByName(ctx, name)
if err != nil {
return "", err
}
return fmt.Sprint(entity.PipelineId), nil
}
r.Query = func(ctx context.Context, w *databricks.WorkspaceClient, name string) (string, error) {
fn, ok := lookupOverrides["Query"]
if ok {
return fn(ctx, w, name)
}
entity, err := w.Queries.GetByDisplayName(ctx, name)
if err != nil {
return "", err
}
return fmt.Sprint(entity.Id), nil
}
r.ServicePrincipal = func(ctx context.Context, w *databricks.WorkspaceClient, name string) (string, error) {
fn, ok := lookupOverrides["ServicePrincipal"]
if ok {
return fn(ctx, w, name)
}
entity, err := w.ServicePrincipals.GetByDisplayName(ctx, name)
if err != nil {
return "", err
}
return fmt.Sprint(entity.ApplicationId), nil
}
r.Warehouse = func(ctx context.Context, w *databricks.WorkspaceClient, name string) (string, error) {
fn, ok := lookupOverrides["Warehouse"]
if ok {
return fn(ctx, w, name)
}
entity, err := w.Warehouses.GetByName(ctx, name)
if err != nil {
return "", err
}
return fmt.Sprint(entity.Id), nil
}
return r
return r.String()
}

View File

@ -0,0 +1,60 @@
package variable
import (
"context"
"reflect"
"testing"
"github.com/stretchr/testify/assert"
)
func TestLookup_Coverage(t *testing.T) {
var lookup Lookup
val := reflect.ValueOf(lookup)
typ := val.Type()
for i := 0; i < val.NumField(); i++ {
field := val.Field(i)
if field.Kind() != reflect.String {
t.Fatalf("Field %s is not a string", typ.Field(i).Name)
}
fieldType := typ.Field(i)
t.Run(fieldType.Name, func(t *testing.T) {
// Use a fresh instance of the struct in each test
var lookup Lookup
// Set the field to a non-empty string
reflect.ValueOf(&lookup).Elem().Field(i).SetString("value")
// Test the [String] function
assert.NotEmpty(t, lookup.String())
})
}
}
func TestLookup_Empty(t *testing.T) {
var lookup Lookup
// Resolve returns an error when no fields are provided
_, err := lookup.Resolve(context.Background(), nil)
assert.ErrorContains(t, err, "no valid lookup fields provided")
// No string representation for an invalid lookup
assert.Empty(t, lookup.String())
}
func TestLookup_Multiple(t *testing.T) {
lookup := Lookup{
Alert: "alert",
Query: "query",
}
// Resolve returns an error when multiple fields are provided
_, err := lookup.Resolve(context.Background(), nil)
assert.ErrorContains(t, err, "exactly one lookup field must be provided")
// No string representation for an invalid lookup
assert.Empty(t, lookup.String())
}

View File

@ -0,0 +1,24 @@
package variable
import (
"context"
"fmt"
"github.com/databricks/databricks-sdk-go"
)
type resolveAlert struct {
name string
}
func (l resolveAlert) Resolve(ctx context.Context, w *databricks.WorkspaceClient) (string, error) {
entity, err := w.Alerts.GetByDisplayName(ctx, l.name)
if err != nil {
return "", err
}
return fmt.Sprint(entity.Id), nil
}
func (l resolveAlert) String() string {
return fmt.Sprintf("alert: %s", l.name)
}

View File

@ -0,0 +1,49 @@
package variable
import (
"context"
"testing"
"github.com/databricks/databricks-sdk-go/apierr"
"github.com/databricks/databricks-sdk-go/experimental/mocks"
"github.com/databricks/databricks-sdk-go/service/sql"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
)
func TestResolveAlert_ResolveSuccess(t *testing.T) {
m := mocks.NewMockWorkspaceClient(t)
api := m.GetMockAlertsAPI()
api.EXPECT().
GetByDisplayName(mock.Anything, "alert").
Return(&sql.ListAlertsResponseAlert{
Id: "1234",
}, nil)
ctx := context.Background()
l := resolveAlert{name: "alert"}
result, err := l.Resolve(ctx, m.WorkspaceClient)
require.NoError(t, err)
assert.Equal(t, "1234", result)
}
func TestResolveAlert_ResolveNotFound(t *testing.T) {
m := mocks.NewMockWorkspaceClient(t)
api := m.GetMockAlertsAPI()
api.EXPECT().
GetByDisplayName(mock.Anything, "alert").
Return(nil, &apierr.APIError{StatusCode: 404})
ctx := context.Background()
l := resolveAlert{name: "alert"}
_, err := l.Resolve(ctx, m.WorkspaceClient)
require.ErrorIs(t, err, apierr.ErrNotFound)
}
func TestResolveAlert_String(t *testing.T) {
l := resolveAlert{name: "name"}
assert.Equal(t, "alert: name", l.String())
}

View File

@ -8,13 +8,13 @@ import (
"github.com/databricks/databricks-sdk-go/service/compute"
)
var lookupOverrides = map[string]resolverFunc{
"Cluster": resolveCluster,
type resolveCluster struct {
name string
}
// We added a custom resolver for the cluster to add filtering for the cluster source when we list all clusters.
// Without the filtering listing could take a very long time (5-10 mins) which leads to lookup timeouts.
func resolveCluster(ctx context.Context, w *databricks.WorkspaceClient, name string) (string, error) {
func (l resolveCluster) Resolve(ctx context.Context, w *databricks.WorkspaceClient) (string, error) {
result, err := w.Clusters.ListAll(ctx, compute.ListClustersRequest{
FilterBy: &compute.ListClustersFilterBy{
ClusterSources: []compute.ClusterSource{compute.ClusterSourceApi, compute.ClusterSourceUi},
@ -30,6 +30,8 @@ func resolveCluster(ctx context.Context, w *databricks.WorkspaceClient, name str
key := v.ClusterName
tmp[key] = append(tmp[key], v)
}
name := l.name
alternatives, ok := tmp[name]
if !ok || len(alternatives) == 0 {
return "", fmt.Errorf("cluster named '%s' does not exist", name)
@ -39,3 +41,7 @@ func resolveCluster(ctx context.Context, w *databricks.WorkspaceClient, name str
}
return alternatives[0].ClusterId, nil
}
func (l resolveCluster) String() string {
return fmt.Sprintf("cluster: %s", l.name)
}

View File

@ -0,0 +1,24 @@
package variable
import (
"context"
"fmt"
"github.com/databricks/databricks-sdk-go"
)
type resolveClusterPolicy struct {
name string
}
func (l resolveClusterPolicy) Resolve(ctx context.Context, w *databricks.WorkspaceClient) (string, error) {
entity, err := w.ClusterPolicies.GetByName(ctx, l.name)
if err != nil {
return "", err
}
return fmt.Sprint(entity.PolicyId), nil
}
func (l resolveClusterPolicy) String() string {
return fmt.Sprintf("cluster-policy: %s", l.name)
}

View File

@ -0,0 +1,49 @@
package variable
import (
"context"
"testing"
"github.com/databricks/databricks-sdk-go/apierr"
"github.com/databricks/databricks-sdk-go/experimental/mocks"
"github.com/databricks/databricks-sdk-go/service/compute"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
)
func TestResolveClusterPolicy_ResolveSuccess(t *testing.T) {
m := mocks.NewMockWorkspaceClient(t)
api := m.GetMockClusterPoliciesAPI()
api.EXPECT().
GetByName(mock.Anything, "policy").
Return(&compute.Policy{
PolicyId: "1234",
}, nil)
ctx := context.Background()
l := resolveClusterPolicy{name: "policy"}
result, err := l.Resolve(ctx, m.WorkspaceClient)
require.NoError(t, err)
assert.Equal(t, "1234", result)
}
func TestResolveClusterPolicy_ResolveNotFound(t *testing.T) {
m := mocks.NewMockWorkspaceClient(t)
api := m.GetMockClusterPoliciesAPI()
api.EXPECT().
GetByName(mock.Anything, "policy").
Return(nil, &apierr.APIError{StatusCode: 404})
ctx := context.Background()
l := resolveClusterPolicy{name: "policy"}
_, err := l.Resolve(ctx, m.WorkspaceClient)
require.ErrorIs(t, err, apierr.ErrNotFound)
}
func TestResolveClusterPolicy_String(t *testing.T) {
l := resolveClusterPolicy{name: "name"}
assert.Equal(t, "cluster-policy: name", l.String())
}

View File

@ -0,0 +1,50 @@
package variable
import (
"context"
"testing"
"github.com/databricks/databricks-sdk-go/experimental/mocks"
"github.com/databricks/databricks-sdk-go/service/compute"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
)
func TestResolveCluster_ResolveSuccess(t *testing.T) {
m := mocks.NewMockWorkspaceClient(t)
api := m.GetMockClustersAPI()
api.EXPECT().
ListAll(mock.Anything, mock.Anything).
Return([]compute.ClusterDetails{
{ClusterId: "1234", ClusterName: "cluster1"},
{ClusterId: "2345", ClusterName: "cluster2"},
}, nil)
ctx := context.Background()
l := resolveCluster{name: "cluster2"}
result, err := l.Resolve(ctx, m.WorkspaceClient)
require.NoError(t, err)
assert.Equal(t, "2345", result)
}
func TestResolveCluster_ResolveNotFound(t *testing.T) {
m := mocks.NewMockWorkspaceClient(t)
api := m.GetMockClustersAPI()
api.EXPECT().
ListAll(mock.Anything, mock.Anything).
Return([]compute.ClusterDetails{}, nil)
ctx := context.Background()
l := resolveCluster{name: "cluster"}
_, err := l.Resolve(ctx, m.WorkspaceClient)
require.Error(t, err)
assert.Contains(t, err.Error(), "cluster named 'cluster' does not exist")
}
func TestResolveCluster_String(t *testing.T) {
l := resolveCluster{name: "name"}
assert.Equal(t, "cluster: name", l.String())
}

View File

@ -0,0 +1,24 @@
package variable
import (
"context"
"fmt"
"github.com/databricks/databricks-sdk-go"
)
type resolveDashboard struct {
name string
}
func (l resolveDashboard) Resolve(ctx context.Context, w *databricks.WorkspaceClient) (string, error) {
entity, err := w.Dashboards.GetByName(ctx, l.name)
if err != nil {
return "", err
}
return fmt.Sprint(entity.Id), nil
}
func (l resolveDashboard) String() string {
return fmt.Sprintf("dashboard: %s", l.name)
}

View File

@ -0,0 +1,49 @@
package variable
import (
"context"
"testing"
"github.com/databricks/databricks-sdk-go/apierr"
"github.com/databricks/databricks-sdk-go/experimental/mocks"
"github.com/databricks/databricks-sdk-go/service/sql"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
)
func TestResolveDashboard_ResolveSuccess(t *testing.T) {
m := mocks.NewMockWorkspaceClient(t)
api := m.GetMockDashboardsAPI()
api.EXPECT().
GetByName(mock.Anything, "dashboard").
Return(&sql.Dashboard{
Id: "1234",
}, nil)
ctx := context.Background()
l := resolveDashboard{name: "dashboard"}
result, err := l.Resolve(ctx, m.WorkspaceClient)
require.NoError(t, err)
assert.Equal(t, "1234", result)
}
func TestResolveDashboard_ResolveNotFound(t *testing.T) {
m := mocks.NewMockWorkspaceClient(t)
api := m.GetMockDashboardsAPI()
api.EXPECT().
GetByName(mock.Anything, "dashboard").
Return(nil, &apierr.APIError{StatusCode: 404})
ctx := context.Background()
l := resolveDashboard{name: "dashboard"}
_, err := l.Resolve(ctx, m.WorkspaceClient)
require.ErrorIs(t, err, apierr.ErrNotFound)
}
func TestResolveDashboard_String(t *testing.T) {
l := resolveDashboard{name: "name"}
assert.Equal(t, "dashboard: name", l.String())
}

View File

@ -0,0 +1,24 @@
package variable
import (
"context"
"fmt"
"github.com/databricks/databricks-sdk-go"
)
type resolveInstancePool struct {
name string
}
func (l resolveInstancePool) Resolve(ctx context.Context, w *databricks.WorkspaceClient) (string, error) {
entity, err := w.InstancePools.GetByInstancePoolName(ctx, l.name)
if err != nil {
return "", err
}
return fmt.Sprint(entity.InstancePoolId), nil
}
func (l resolveInstancePool) String() string {
return fmt.Sprintf("instance-pool: %s", l.name)
}

View File

@ -0,0 +1,49 @@
package variable
import (
"context"
"testing"
"github.com/databricks/databricks-sdk-go/apierr"
"github.com/databricks/databricks-sdk-go/experimental/mocks"
"github.com/databricks/databricks-sdk-go/service/compute"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
)
func TestResolveInstancePool_ResolveSuccess(t *testing.T) {
m := mocks.NewMockWorkspaceClient(t)
api := m.GetMockInstancePoolsAPI()
api.EXPECT().
GetByInstancePoolName(mock.Anything, "instance_pool").
Return(&compute.InstancePoolAndStats{
InstancePoolId: "5678",
}, nil)
ctx := context.Background()
l := resolveInstancePool{name: "instance_pool"}
result, err := l.Resolve(ctx, m.WorkspaceClient)
require.NoError(t, err)
assert.Equal(t, "5678", result)
}
func TestResolveInstancePool_ResolveNotFound(t *testing.T) {
m := mocks.NewMockWorkspaceClient(t)
api := m.GetMockInstancePoolsAPI()
api.EXPECT().
GetByInstancePoolName(mock.Anything, "instance_pool").
Return(nil, &apierr.APIError{StatusCode: 404})
ctx := context.Background()
l := resolveInstancePool{name: "instance_pool"}
_, err := l.Resolve(ctx, m.WorkspaceClient)
require.ErrorIs(t, err, apierr.ErrNotFound)
}
func TestResolveInstancePool_String(t *testing.T) {
l := resolveInstancePool{name: "name"}
assert.Equal(t, "instance-pool: name", l.String())
}

View File

@ -0,0 +1,24 @@
package variable
import (
"context"
"fmt"
"github.com/databricks/databricks-sdk-go"
)
type resolveJob struct {
name string
}
func (l resolveJob) Resolve(ctx context.Context, w *databricks.WorkspaceClient) (string, error) {
entity, err := w.Jobs.GetBySettingsName(ctx, l.name)
if err != nil {
return "", err
}
return fmt.Sprint(entity.JobId), nil
}
func (l resolveJob) String() string {
return fmt.Sprintf("job: %s", l.name)
}

View File

@ -0,0 +1,49 @@
package variable
import (
"context"
"testing"
"github.com/databricks/databricks-sdk-go/apierr"
"github.com/databricks/databricks-sdk-go/experimental/mocks"
"github.com/databricks/databricks-sdk-go/service/jobs"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
)
func TestResolveJob_ResolveSuccess(t *testing.T) {
m := mocks.NewMockWorkspaceClient(t)
api := m.GetMockJobsAPI()
api.EXPECT().
GetBySettingsName(mock.Anything, "job").
Return(&jobs.BaseJob{
JobId: 5678,
}, nil)
ctx := context.Background()
l := resolveJob{name: "job"}
result, err := l.Resolve(ctx, m.WorkspaceClient)
require.NoError(t, err)
assert.Equal(t, "5678", result)
}
func TestResolveJob_ResolveNotFound(t *testing.T) {
m := mocks.NewMockWorkspaceClient(t)
api := m.GetMockJobsAPI()
api.EXPECT().
GetBySettingsName(mock.Anything, "job").
Return(nil, &apierr.APIError{StatusCode: 404})
ctx := context.Background()
l := resolveJob{name: "job"}
_, err := l.Resolve(ctx, m.WorkspaceClient)
require.ErrorIs(t, err, apierr.ErrNotFound)
}
func TestResolveJob_String(t *testing.T) {
l := resolveJob{name: "name"}
assert.Equal(t, "job: name", l.String())
}

View File

@ -0,0 +1,24 @@
package variable
import (
"context"
"fmt"
"github.com/databricks/databricks-sdk-go"
)
type resolveMetastore struct {
name string
}
func (l resolveMetastore) Resolve(ctx context.Context, w *databricks.WorkspaceClient) (string, error) {
entity, err := w.Metastores.GetByName(ctx, l.name)
if err != nil {
return "", err
}
return fmt.Sprint(entity.MetastoreId), nil
}
func (l resolveMetastore) String() string {
return fmt.Sprintf("metastore: %s", l.name)
}

View File

@ -0,0 +1,49 @@
package variable
import (
"context"
"testing"
"github.com/databricks/databricks-sdk-go/apierr"
"github.com/databricks/databricks-sdk-go/experimental/mocks"
"github.com/databricks/databricks-sdk-go/service/catalog"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
)
func TestResolveMetastore_ResolveSuccess(t *testing.T) {
m := mocks.NewMockWorkspaceClient(t)
api := m.GetMockMetastoresAPI()
api.EXPECT().
GetByName(mock.Anything, "metastore").
Return(&catalog.MetastoreInfo{
MetastoreId: "abcd",
}, nil)
ctx := context.Background()
l := resolveMetastore{name: "metastore"}
result, err := l.Resolve(ctx, m.WorkspaceClient)
require.NoError(t, err)
assert.Equal(t, "abcd", result)
}
func TestResolveMetastore_ResolveNotFound(t *testing.T) {
m := mocks.NewMockWorkspaceClient(t)
api := m.GetMockMetastoresAPI()
api.EXPECT().
GetByName(mock.Anything, "metastore").
Return(nil, &apierr.APIError{StatusCode: 404})
ctx := context.Background()
l := resolveMetastore{name: "metastore"}
_, err := l.Resolve(ctx, m.WorkspaceClient)
require.ErrorIs(t, err, apierr.ErrNotFound)
}
func TestResolveMetastore_String(t *testing.T) {
l := resolveMetastore{name: "name"}
assert.Equal(t, "metastore: name", l.String())
}

View File

@ -0,0 +1,46 @@
package variable
import (
"context"
"fmt"
"github.com/databricks/databricks-sdk-go"
"github.com/databricks/databricks-sdk-go/service/settings"
)
type resolveNotificationDestination struct {
name string
}
func (l resolveNotificationDestination) Resolve(ctx context.Context, w *databricks.WorkspaceClient) (string, error) {
result, err := w.NotificationDestinations.ListAll(ctx, settings.ListNotificationDestinationsRequest{
// The default page size for this API is 20.
// We use a higher value to make fewer API calls.
PageSize: 200,
})
if err != nil {
return "", err
}
// Collect all notification destinations with the given name.
var entities []settings.ListNotificationDestinationsResult
for _, entity := range result {
if entity.DisplayName == l.name {
entities = append(entities, entity)
}
}
// Return the ID of the first matching notification destination.
switch len(entities) {
case 0:
return "", fmt.Errorf("notification destination named %q does not exist", l.name)
case 1:
return entities[0].Id, nil
default:
return "", fmt.Errorf("there are %d instances of clusters named %q", len(entities), l.name)
}
}
func (l resolveNotificationDestination) String() string {
return fmt.Sprintf("notification-destination: %s", l.name)
}

View File

@ -0,0 +1,82 @@
package variable
import (
"context"
"fmt"
"testing"
"github.com/databricks/databricks-sdk-go/experimental/mocks"
"github.com/databricks/databricks-sdk-go/service/settings"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
)
func TestResolveNotificationDestination_ResolveSuccess(t *testing.T) {
m := mocks.NewMockWorkspaceClient(t)
api := m.GetMockNotificationDestinationsAPI()
api.EXPECT().
ListAll(mock.Anything, mock.Anything).
Return([]settings.ListNotificationDestinationsResult{
{Id: "1234", DisplayName: "destination"},
}, nil)
ctx := context.Background()
l := resolveNotificationDestination{name: "destination"}
result, err := l.Resolve(ctx, m.WorkspaceClient)
require.NoError(t, err)
assert.Equal(t, "1234", result)
}
func TestResolveNotificationDestination_ResolveError(t *testing.T) {
m := mocks.NewMockWorkspaceClient(t)
api := m.GetMockNotificationDestinationsAPI()
api.EXPECT().
ListAll(mock.Anything, mock.Anything).
Return(nil, fmt.Errorf("bad"))
ctx := context.Background()
l := resolveNotificationDestination{name: "destination"}
_, err := l.Resolve(ctx, m.WorkspaceClient)
assert.ErrorContains(t, err, "bad")
}
func TestResolveNotificationDestination_ResolveNotFound(t *testing.T) {
m := mocks.NewMockWorkspaceClient(t)
api := m.GetMockNotificationDestinationsAPI()
api.EXPECT().
ListAll(mock.Anything, mock.Anything).
Return([]settings.ListNotificationDestinationsResult{}, nil)
ctx := context.Background()
l := resolveNotificationDestination{name: "destination"}
_, err := l.Resolve(ctx, m.WorkspaceClient)
require.Error(t, err)
assert.ErrorContains(t, err, `notification destination named "destination" does not exist`)
}
func TestResolveNotificationDestination_ResolveMultiple(t *testing.T) {
m := mocks.NewMockWorkspaceClient(t)
api := m.GetMockNotificationDestinationsAPI()
api.EXPECT().
ListAll(mock.Anything, mock.Anything).
Return([]settings.ListNotificationDestinationsResult{
{Id: "1234", DisplayName: "destination"},
{Id: "5678", DisplayName: "destination"},
}, nil)
ctx := context.Background()
l := resolveNotificationDestination{name: "destination"}
_, err := l.Resolve(ctx, m.WorkspaceClient)
require.Error(t, err)
assert.ErrorContains(t, err, `there are 2 instances of clusters named "destination"`)
}
func TestResolveNotificationDestination_String(t *testing.T) {
l := resolveNotificationDestination{name: "name"}
assert.Equal(t, "notification-destination: name", l.String())
}

View File

@ -0,0 +1,24 @@
package variable
import (
"context"
"fmt"
"github.com/databricks/databricks-sdk-go"
)
type resolvePipeline struct {
name string
}
func (l resolvePipeline) Resolve(ctx context.Context, w *databricks.WorkspaceClient) (string, error) {
entity, err := w.Pipelines.GetByName(ctx, l.name)
if err != nil {
return "", err
}
return fmt.Sprint(entity.PipelineId), nil
}
func (l resolvePipeline) String() string {
return fmt.Sprintf("pipeline: %s", l.name)
}

View File

@ -0,0 +1,49 @@
package variable
import (
"context"
"testing"
"github.com/databricks/databricks-sdk-go/apierr"
"github.com/databricks/databricks-sdk-go/experimental/mocks"
"github.com/databricks/databricks-sdk-go/service/pipelines"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
)
func TestResolvePipeline_ResolveSuccess(t *testing.T) {
m := mocks.NewMockWorkspaceClient(t)
api := m.GetMockPipelinesAPI()
api.EXPECT().
GetByName(mock.Anything, "pipeline").
Return(&pipelines.PipelineStateInfo{
PipelineId: "abcd",
}, nil)
ctx := context.Background()
l := resolvePipeline{name: "pipeline"}
result, err := l.Resolve(ctx, m.WorkspaceClient)
require.NoError(t, err)
assert.Equal(t, "abcd", result)
}
func TestResolvePipeline_ResolveNotFound(t *testing.T) {
m := mocks.NewMockWorkspaceClient(t)
api := m.GetMockPipelinesAPI()
api.EXPECT().
GetByName(mock.Anything, "pipeline").
Return(nil, &apierr.APIError{StatusCode: 404})
ctx := context.Background()
l := resolvePipeline{name: "pipeline"}
_, err := l.Resolve(ctx, m.WorkspaceClient)
require.ErrorIs(t, err, apierr.ErrNotFound)
}
func TestResolvePipeline_String(t *testing.T) {
l := resolvePipeline{name: "name"}
assert.Equal(t, "pipeline: name", l.String())
}

View File

@ -0,0 +1,24 @@
package variable
import (
"context"
"fmt"
"github.com/databricks/databricks-sdk-go"
)
type resolveQuery struct {
name string
}
func (l resolveQuery) Resolve(ctx context.Context, w *databricks.WorkspaceClient) (string, error) {
entity, err := w.Queries.GetByDisplayName(ctx, l.name)
if err != nil {
return "", err
}
return fmt.Sprint(entity.Id), nil
}
func (l resolveQuery) String() string {
return fmt.Sprintf("query: %s", l.name)
}

View File

@ -0,0 +1,49 @@
package variable
import (
"context"
"testing"
"github.com/databricks/databricks-sdk-go/apierr"
"github.com/databricks/databricks-sdk-go/experimental/mocks"
"github.com/databricks/databricks-sdk-go/service/sql"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
)
func TestResolveQuery_ResolveSuccess(t *testing.T) {
m := mocks.NewMockWorkspaceClient(t)
api := m.GetMockQueriesAPI()
api.EXPECT().
GetByDisplayName(mock.Anything, "query").
Return(&sql.ListQueryObjectsResponseQuery{
Id: "1234",
}, nil)
ctx := context.Background()
l := resolveQuery{name: "query"}
result, err := l.Resolve(ctx, m.WorkspaceClient)
require.NoError(t, err)
assert.Equal(t, "1234", result)
}
func TestResolveQuery_ResolveNotFound(t *testing.T) {
m := mocks.NewMockWorkspaceClient(t)
api := m.GetMockQueriesAPI()
api.EXPECT().
GetByDisplayName(mock.Anything, "query").
Return(nil, &apierr.APIError{StatusCode: 404})
ctx := context.Background()
l := resolveQuery{name: "query"}
_, err := l.Resolve(ctx, m.WorkspaceClient)
require.ErrorIs(t, err, apierr.ErrNotFound)
}
func TestResolveQuery_String(t *testing.T) {
l := resolveQuery{name: "name"}
assert.Equal(t, "query: name", l.String())
}

View File

@ -0,0 +1,24 @@
package variable
import (
"context"
"fmt"
"github.com/databricks/databricks-sdk-go"
)
type resolveServicePrincipal struct {
name string
}
func (l resolveServicePrincipal) Resolve(ctx context.Context, w *databricks.WorkspaceClient) (string, error) {
entity, err := w.ServicePrincipals.GetByDisplayName(ctx, l.name)
if err != nil {
return "", err
}
return fmt.Sprint(entity.ApplicationId), nil
}
func (l resolveServicePrincipal) String() string {
return fmt.Sprintf("service-principal: %s", l.name)
}

View File

@ -0,0 +1,49 @@
package variable
import (
"context"
"testing"
"github.com/databricks/databricks-sdk-go/apierr"
"github.com/databricks/databricks-sdk-go/experimental/mocks"
"github.com/databricks/databricks-sdk-go/service/iam"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
)
func TestResolveServicePrincipal_ResolveSuccess(t *testing.T) {
m := mocks.NewMockWorkspaceClient(t)
api := m.GetMockServicePrincipalsAPI()
api.EXPECT().
GetByDisplayName(mock.Anything, "service-principal").
Return(&iam.ServicePrincipal{
ApplicationId: "5678",
}, nil)
ctx := context.Background()
l := resolveServicePrincipal{name: "service-principal"}
result, err := l.Resolve(ctx, m.WorkspaceClient)
require.NoError(t, err)
assert.Equal(t, "5678", result)
}
func TestResolveServicePrincipal_ResolveNotFound(t *testing.T) {
m := mocks.NewMockWorkspaceClient(t)
api := m.GetMockServicePrincipalsAPI()
api.EXPECT().
GetByDisplayName(mock.Anything, "service-principal").
Return(nil, &apierr.APIError{StatusCode: 404})
ctx := context.Background()
l := resolveServicePrincipal{name: "service-principal"}
_, err := l.Resolve(ctx, m.WorkspaceClient)
require.ErrorIs(t, err, apierr.ErrNotFound)
}
func TestResolveServicePrincipal_String(t *testing.T) {
l := resolveServicePrincipal{name: "name"}
assert.Equal(t, "service-principal: name", l.String())
}

View File

@ -0,0 +1,24 @@
package variable
import (
"context"
"fmt"
"github.com/databricks/databricks-sdk-go"
)
type resolveWarehouse struct {
name string
}
func (l resolveWarehouse) Resolve(ctx context.Context, w *databricks.WorkspaceClient) (string, error) {
entity, err := w.Warehouses.GetByName(ctx, l.name)
if err != nil {
return "", err
}
return fmt.Sprint(entity.Id), nil
}
func (l resolveWarehouse) String() string {
return fmt.Sprintf("warehouse: %s", l.name)
}

View File

@ -0,0 +1,49 @@
package variable
import (
"context"
"testing"
"github.com/databricks/databricks-sdk-go/apierr"
"github.com/databricks/databricks-sdk-go/experimental/mocks"
"github.com/databricks/databricks-sdk-go/service/sql"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
)
func TestResolveWarehouse_ResolveSuccess(t *testing.T) {
m := mocks.NewMockWorkspaceClient(t)
api := m.GetMockWarehousesAPI()
api.EXPECT().
GetByName(mock.Anything, "warehouse").
Return(&sql.EndpointInfo{
Id: "abcd",
}, nil)
ctx := context.Background()
l := resolveWarehouse{name: "warehouse"}
result, err := l.Resolve(ctx, m.WorkspaceClient)
require.NoError(t, err)
assert.Equal(t, "abcd", result)
}
func TestResolveWarehouse_ResolveNotFound(t *testing.T) {
m := mocks.NewMockWorkspaceClient(t)
api := m.GetMockWarehousesAPI()
api.EXPECT().
GetByName(mock.Anything, "warehouse").
Return(nil, &apierr.APIError{StatusCode: 404})
ctx := context.Background()
l := resolveWarehouse{name: "warehouse"}
_, err := l.Resolve(ctx, m.WorkspaceClient)
require.ErrorIs(t, err, apierr.ErrNotFound)
}
func TestResolveWarehouse_String(t *testing.T) {
l := resolveWarehouse{name: "name"}
assert.Equal(t, "warehouse: name", l.String())
}

View File

@ -7,6 +7,7 @@ import (
"io/fs"
"github.com/databricks/cli/bundle"
"github.com/databricks/cli/bundle/config"
"github.com/databricks/cli/bundle/permissions"
"github.com/databricks/cli/libs/cmdio"
"github.com/databricks/cli/libs/diag"
@ -23,6 +24,11 @@ func (m *upload) Name() string {
}
func (m *upload) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
if config.IsExplicitlyEnabled(b.Config.Presets.SourceLinkedDeployment) {
cmdio.LogString(ctx, "Source-linked deployment is enabled. Deployed resources reference the source files in your working tree instead of separate copies.")
return nil
}
cmdio.LogString(ctx, fmt.Sprintf("Uploading bundle files to %s...", b.Config.Workspace.FilePath))
opts, err := GetSyncOptions(ctx, bundle.ReadOnly(b))
if err != nil {

View File

@ -29,7 +29,7 @@ func mockDashboardBundle(t *testing.T) *bundle.Bundle {
Resources: config.Resources{
Dashboards: map[string]*resources.Dashboard{
"dash1": {
CreateDashboardRequest: &dashboards.CreateDashboardRequest{
Dashboard: &dashboards.Dashboard{
DisplayName: "My Special Dashboard",
},
},

View File

@ -792,7 +792,7 @@ func TestTerraformToBundleEmptyRemoteResources(t *testing.T) {
},
Dashboards: map[string]*resources.Dashboard{
"test_dashboard": {
CreateDashboardRequest: &dashboards.CreateDashboardRequest{
Dashboard: &dashboards.Dashboard{
DisplayName: "test_dashboard",
},
},
@ -951,12 +951,12 @@ func TestTerraformToBundleModifiedResources(t *testing.T) {
},
Dashboards: map[string]*resources.Dashboard{
"test_dashboard": {
CreateDashboardRequest: &dashboards.CreateDashboardRequest{
Dashboard: &dashboards.Dashboard{
DisplayName: "test_dashboard",
},
},
"test_dashboard_new": {
CreateDashboardRequest: &dashboards.CreateDashboardRequest{
Dashboard: &dashboards.Dashboard{
DisplayName: "test_dashboard_new",
},
},

View File

@ -15,7 +15,7 @@ import (
func TestConvertDashboard(t *testing.T) {
var src = resources.Dashboard{
CreateDashboardRequest: &dashboards.CreateDashboardRequest{
Dashboard: &dashboards.Dashboard{
DisplayName: "my dashboard",
WarehouseId: "f00dcafe",
ParentPath: "/some/path",

View File

@ -15,8 +15,8 @@ import (
func TestConvertQualityMonitor(t *testing.T) {
var src = resources.QualityMonitor{
TableName: "test_table_name",
CreateMonitor: &catalog.CreateMonitor{
TableName: "test_table_name",
AssetsDir: "assets_dir",
OutputSchemaName: "output_schema_name",
InferenceLog: &catalog.MonitorInferenceLog{

View File

@ -4,6 +4,7 @@ bundle:
resources:
quality_monitors:
myqualitymonitor:
table_name: catalog.schema.quality_monitor
inference_log:
granularities:
- a

View File

@ -1,3 +1,3 @@
package schema
const ProviderVersion = "1.54.0"
const ProviderVersion = "1.58.0"

View File

@ -0,0 +1,98 @@
// Generated from Databricks Terraform provider schema. DO NOT EDIT.
package schema
type DataSourceFunctionsFunctionsInputParamsParameters struct {
Comment string `json:"comment,omitempty"`
Name string `json:"name"`
ParameterDefault string `json:"parameter_default,omitempty"`
ParameterMode string `json:"parameter_mode,omitempty"`
ParameterType string `json:"parameter_type,omitempty"`
Position int `json:"position"`
TypeIntervalType string `json:"type_interval_type,omitempty"`
TypeJson string `json:"type_json,omitempty"`
TypeName string `json:"type_name"`
TypePrecision int `json:"type_precision,omitempty"`
TypeScale int `json:"type_scale,omitempty"`
TypeText string `json:"type_text"`
}
type DataSourceFunctionsFunctionsInputParams struct {
Parameters []DataSourceFunctionsFunctionsInputParamsParameters `json:"parameters,omitempty"`
}
type DataSourceFunctionsFunctionsReturnParamsParameters struct {
Comment string `json:"comment,omitempty"`
Name string `json:"name"`
ParameterDefault string `json:"parameter_default,omitempty"`
ParameterMode string `json:"parameter_mode,omitempty"`
ParameterType string `json:"parameter_type,omitempty"`
Position int `json:"position"`
TypeIntervalType string `json:"type_interval_type,omitempty"`
TypeJson string `json:"type_json,omitempty"`
TypeName string `json:"type_name"`
TypePrecision int `json:"type_precision,omitempty"`
TypeScale int `json:"type_scale,omitempty"`
TypeText string `json:"type_text"`
}
type DataSourceFunctionsFunctionsReturnParams struct {
Parameters []DataSourceFunctionsFunctionsReturnParamsParameters `json:"parameters,omitempty"`
}
type DataSourceFunctionsFunctionsRoutineDependenciesDependenciesFunction struct {
FunctionFullName string `json:"function_full_name"`
}
type DataSourceFunctionsFunctionsRoutineDependenciesDependenciesTable struct {
TableFullName string `json:"table_full_name"`
}
type DataSourceFunctionsFunctionsRoutineDependenciesDependencies struct {
Function []DataSourceFunctionsFunctionsRoutineDependenciesDependenciesFunction `json:"function,omitempty"`
Table []DataSourceFunctionsFunctionsRoutineDependenciesDependenciesTable `json:"table,omitempty"`
}
type DataSourceFunctionsFunctionsRoutineDependencies struct {
Dependencies []DataSourceFunctionsFunctionsRoutineDependenciesDependencies `json:"dependencies,omitempty"`
}
type DataSourceFunctionsFunctions struct {
BrowseOnly bool `json:"browse_only,omitempty"`
CatalogName string `json:"catalog_name,omitempty"`
Comment string `json:"comment,omitempty"`
CreatedAt int `json:"created_at,omitempty"`
CreatedBy string `json:"created_by,omitempty"`
DataType string `json:"data_type,omitempty"`
ExternalLanguage string `json:"external_language,omitempty"`
ExternalName string `json:"external_name,omitempty"`
FullDataType string `json:"full_data_type,omitempty"`
FullName string `json:"full_name,omitempty"`
FunctionId string `json:"function_id,omitempty"`
IsDeterministic bool `json:"is_deterministic,omitempty"`
IsNullCall bool `json:"is_null_call,omitempty"`
MetastoreId string `json:"metastore_id,omitempty"`
Name string `json:"name,omitempty"`
Owner string `json:"owner,omitempty"`
ParameterStyle string `json:"parameter_style,omitempty"`
Properties string `json:"properties,omitempty"`
RoutineBody string `json:"routine_body,omitempty"`
RoutineDefinition string `json:"routine_definition,omitempty"`
SchemaName string `json:"schema_name,omitempty"`
SecurityType string `json:"security_type,omitempty"`
SpecificName string `json:"specific_name,omitempty"`
SqlDataAccess string `json:"sql_data_access,omitempty"`
SqlPath string `json:"sql_path,omitempty"`
UpdatedAt int `json:"updated_at,omitempty"`
UpdatedBy string `json:"updated_by,omitempty"`
InputParams []DataSourceFunctionsFunctionsInputParams `json:"input_params,omitempty"`
ReturnParams []DataSourceFunctionsFunctionsReturnParams `json:"return_params,omitempty"`
RoutineDependencies []DataSourceFunctionsFunctionsRoutineDependencies `json:"routine_dependencies,omitempty"`
}
type DataSourceFunctions struct {
CatalogName string `json:"catalog_name"`
IncludeBrowse bool `json:"include_browse,omitempty"`
SchemaName string `json:"schema_name"`
Functions []DataSourceFunctionsFunctions `json:"functions,omitempty"`
}

View File

@ -35,6 +35,7 @@ type DataSourceStorageCredentialStorageCredentialInfo struct {
Comment string `json:"comment,omitempty"`
CreatedAt int `json:"created_at,omitempty"`
CreatedBy string `json:"created_by,omitempty"`
FullName string `json:"full_name,omitempty"`
Id string `json:"id,omitempty"`
IsolationMode string `json:"isolation_mode,omitempty"`
MetastoreId string `json:"metastore_id,omitempty"`

View File

@ -4,7 +4,6 @@ package schema
type DataSourceVolumes struct {
CatalogName string `json:"catalog_name"`
Id string `json:"id,omitempty"`
Ids []string `json:"ids,omitempty"`
SchemaName string `json:"schema_name"`
}

View File

@ -21,6 +21,7 @@ type DataSources struct {
Directory map[string]any `json:"databricks_directory,omitempty"`
ExternalLocation map[string]any `json:"databricks_external_location,omitempty"`
ExternalLocations map[string]any `json:"databricks_external_locations,omitempty"`
Functions map[string]any `json:"databricks_functions,omitempty"`
Group map[string]any `json:"databricks_group,omitempty"`
InstancePool map[string]any `json:"databricks_instance_pool,omitempty"`
InstanceProfiles map[string]any `json:"databricks_instance_profiles,omitempty"`
@ -79,6 +80,7 @@ func NewDataSources() *DataSources {
Directory: make(map[string]any),
ExternalLocation: make(map[string]any),
ExternalLocations: make(map[string]any),
Functions: make(map[string]any),
Group: make(map[string]any),
InstancePool: make(map[string]any),
InstanceProfiles: make(map[string]any),

View File

@ -0,0 +1,46 @@
// Generated from Databricks Terraform provider schema. DO NOT EDIT.
package schema
type ResourceAlertConditionOperandColumn struct {
Name string `json:"name"`
}
type ResourceAlertConditionOperand struct {
Column *ResourceAlertConditionOperandColumn `json:"column,omitempty"`
}
type ResourceAlertConditionThresholdValue struct {
BoolValue bool `json:"bool_value,omitempty"`
DoubleValue int `json:"double_value,omitempty"`
StringValue string `json:"string_value,omitempty"`
}
type ResourceAlertConditionThreshold struct {
Value *ResourceAlertConditionThresholdValue `json:"value,omitempty"`
}
type ResourceAlertCondition struct {
EmptyResultState string `json:"empty_result_state,omitempty"`
Op string `json:"op"`
Operand *ResourceAlertConditionOperand `json:"operand,omitempty"`
Threshold *ResourceAlertConditionThreshold `json:"threshold,omitempty"`
}
type ResourceAlert struct {
CreateTime string `json:"create_time,omitempty"`
CustomBody string `json:"custom_body,omitempty"`
CustomSubject string `json:"custom_subject,omitempty"`
DisplayName string `json:"display_name"`
Id string `json:"id,omitempty"`
LifecycleState string `json:"lifecycle_state,omitempty"`
NotifyOnOk bool `json:"notify_on_ok,omitempty"`
OwnerUserName string `json:"owner_user_name,omitempty"`
ParentPath string `json:"parent_path,omitempty"`
QueryId string `json:"query_id"`
SecondsToRetrigger int `json:"seconds_to_retrigger,omitempty"`
State string `json:"state,omitempty"`
TriggerTime string `json:"trigger_time,omitempty"`
UpdateTime string `json:"update_time,omitempty"`
Condition *ResourceAlertCondition `json:"condition,omitempty"`
}

View File

@ -0,0 +1,23 @@
// Generated from Databricks Terraform provider schema. DO NOT EDIT.
package schema
type ResourceCustomAppIntegrationTokenAccessPolicy struct {
AccessTokenTtlInMinutes int `json:"access_token_ttl_in_minutes,omitempty"`
RefreshTokenTtlInMinutes int `json:"refresh_token_ttl_in_minutes,omitempty"`
}
type ResourceCustomAppIntegration struct {
ClientId string `json:"client_id,omitempty"`
ClientSecret string `json:"client_secret,omitempty"`
Confidential bool `json:"confidential,omitempty"`
CreateTime string `json:"create_time,omitempty"`
CreatedBy int `json:"created_by,omitempty"`
CreatorUsername string `json:"creator_username,omitempty"`
Id string `json:"id,omitempty"`
IntegrationId string `json:"integration_id,omitempty"`
Name string `json:"name,omitempty"`
RedirectUrls []string `json:"redirect_urls,omitempty"`
Scopes []string `json:"scopes,omitempty"`
TokenAccessPolicy *ResourceCustomAppIntegrationTokenAccessPolicy `json:"token_access_policy,omitempty"`
}

View File

@ -19,13 +19,13 @@ type ResourceLibraryPypi struct {
}
type ResourceLibrary struct {
ClusterId string `json:"cluster_id"`
Egg string `json:"egg,omitempty"`
Id string `json:"id,omitempty"`
Jar string `json:"jar,omitempty"`
Requirements string `json:"requirements,omitempty"`
Whl string `json:"whl,omitempty"`
Cran *ResourceLibraryCran `json:"cran,omitempty"`
Maven *ResourceLibraryMaven `json:"maven,omitempty"`
Pypi *ResourceLibraryPypi `json:"pypi,omitempty"`
ClusterId string `json:"cluster_id"`
Egg string `json:"egg,omitempty"`
Id string `json:"id,omitempty"`
Jar string `json:"jar,omitempty"`
Requirements string `json:"requirements,omitempty"`
Whl string `json:"whl,omitempty"`
Cran []ResourceLibraryCran `json:"cran,omitempty"`
Maven []ResourceLibraryMaven `json:"maven,omitempty"`
Pypi []ResourceLibraryPypi `json:"pypi,omitempty"`
}

View File

@ -137,6 +137,7 @@ type ResourcePipelineFilters struct {
type ResourcePipelineGatewayDefinition struct {
ConnectionId string `json:"connection_id,omitempty"`
ConnectionName string `json:"connection_name,omitempty"`
GatewayStorageCatalog string `json:"gateway_storage_catalog,omitempty"`
GatewayStorageName string `json:"gateway_storage_name,omitempty"`
GatewayStorageSchema string `json:"gateway_storage_schema,omitempty"`
@ -242,6 +243,12 @@ type ResourcePipelineNotification struct {
EmailRecipients []string `json:"email_recipients,omitempty"`
}
type ResourcePipelineRestartWindow struct {
DaysOfWeek string `json:"days_of_week,omitempty"`
StartHour int `json:"start_hour"`
TimeZoneId string `json:"time_zone_id,omitempty"`
}
type ResourcePipelineTriggerCron struct {
QuartzCronSchedule string `json:"quartz_cron_schedule,omitempty"`
TimezoneId string `json:"timezone_id,omitempty"`
@ -288,5 +295,6 @@ type ResourcePipeline struct {
LatestUpdates []ResourcePipelineLatestUpdates `json:"latest_updates,omitempty"`
Library []ResourcePipelineLibrary `json:"library,omitempty"`
Notification []ResourcePipelineNotification `json:"notification,omitempty"`
RestartWindow *ResourcePipelineRestartWindow `json:"restart_window,omitempty"`
Trigger *ResourcePipelineTrigger `json:"trigger,omitempty"`
}

View File

@ -0,0 +1,84 @@
// Generated from Databricks Terraform provider schema. DO NOT EDIT.
package schema
type ResourceQueryParameterDateRangeValueDateRangeValue struct {
End string `json:"end"`
Start string `json:"start"`
}
type ResourceQueryParameterDateRangeValue struct {
DynamicDateRangeValue string `json:"dynamic_date_range_value,omitempty"`
Precision string `json:"precision,omitempty"`
StartDayOfWeek int `json:"start_day_of_week,omitempty"`
DateRangeValue *ResourceQueryParameterDateRangeValueDateRangeValue `json:"date_range_value,omitempty"`
}
type ResourceQueryParameterDateValue struct {
DateValue string `json:"date_value,omitempty"`
DynamicDateValue string `json:"dynamic_date_value,omitempty"`
Precision string `json:"precision,omitempty"`
}
type ResourceQueryParameterEnumValueMultiValuesOptions struct {
Prefix string `json:"prefix,omitempty"`
Separator string `json:"separator,omitempty"`
Suffix string `json:"suffix,omitempty"`
}
type ResourceQueryParameterEnumValue struct {
EnumOptions string `json:"enum_options,omitempty"`
Values []string `json:"values,omitempty"`
MultiValuesOptions *ResourceQueryParameterEnumValueMultiValuesOptions `json:"multi_values_options,omitempty"`
}
type ResourceQueryParameterNumericValue struct {
Value int `json:"value"`
}
type ResourceQueryParameterQueryBackedValueMultiValuesOptions struct {
Prefix string `json:"prefix,omitempty"`
Separator string `json:"separator,omitempty"`
Suffix string `json:"suffix,omitempty"`
}
type ResourceQueryParameterQueryBackedValue struct {
QueryId string `json:"query_id"`
Values []string `json:"values,omitempty"`
MultiValuesOptions *ResourceQueryParameterQueryBackedValueMultiValuesOptions `json:"multi_values_options,omitempty"`
}
type ResourceQueryParameterTextValue struct {
Value string `json:"value"`
}
type ResourceQueryParameter struct {
Name string `json:"name"`
Title string `json:"title,omitempty"`
DateRangeValue *ResourceQueryParameterDateRangeValue `json:"date_range_value,omitempty"`
DateValue *ResourceQueryParameterDateValue `json:"date_value,omitempty"`
EnumValue *ResourceQueryParameterEnumValue `json:"enum_value,omitempty"`
NumericValue *ResourceQueryParameterNumericValue `json:"numeric_value,omitempty"`
QueryBackedValue *ResourceQueryParameterQueryBackedValue `json:"query_backed_value,omitempty"`
TextValue *ResourceQueryParameterTextValue `json:"text_value,omitempty"`
}
type ResourceQuery struct {
ApplyAutoLimit bool `json:"apply_auto_limit,omitempty"`
Catalog string `json:"catalog,omitempty"`
CreateTime string `json:"create_time,omitempty"`
Description string `json:"description,omitempty"`
DisplayName string `json:"display_name"`
Id string `json:"id,omitempty"`
LastModifierUserName string `json:"last_modifier_user_name,omitempty"`
LifecycleState string `json:"lifecycle_state,omitempty"`
OwnerUserName string `json:"owner_user_name,omitempty"`
ParentPath string `json:"parent_path,omitempty"`
QueryText string `json:"query_text"`
RunAsMode string `json:"run_as_mode,omitempty"`
Schema string `json:"schema,omitempty"`
Tags []string `json:"tags,omitempty"`
UpdateTime string `json:"update_time,omitempty"`
WarehouseId string `json:"warehouse_id"`
Parameter []ResourceQueryParameter `json:"parameter,omitempty"`
}

View File

@ -4,6 +4,7 @@ package schema
type Resources struct {
AccessControlRuleSet map[string]any `json:"databricks_access_control_rule_set,omitempty"`
Alert map[string]any `json:"databricks_alert,omitempty"`
ArtifactAllowlist map[string]any `json:"databricks_artifact_allowlist,omitempty"`
AutomaticClusterUpdateWorkspaceSetting map[string]any `json:"databricks_automatic_cluster_update_workspace_setting,omitempty"`
AwsS3Mount map[string]any `json:"databricks_aws_s3_mount,omitempty"`
@ -17,6 +18,7 @@ type Resources struct {
ClusterPolicy map[string]any `json:"databricks_cluster_policy,omitempty"`
ComplianceSecurityProfileWorkspaceSetting map[string]any `json:"databricks_compliance_security_profile_workspace_setting,omitempty"`
Connection map[string]any `json:"databricks_connection,omitempty"`
CustomAppIntegration map[string]any `json:"databricks_custom_app_integration,omitempty"`
Dashboard map[string]any `json:"databricks_dashboard,omitempty"`
DbfsFile map[string]any `json:"databricks_dbfs_file,omitempty"`
DefaultNamespaceSetting map[string]any `json:"databricks_default_namespace_setting,omitempty"`
@ -68,6 +70,7 @@ type Resources struct {
Pipeline map[string]any `json:"databricks_pipeline,omitempty"`
Provider map[string]any `json:"databricks_provider,omitempty"`
QualityMonitor map[string]any `json:"databricks_quality_monitor,omitempty"`
Query map[string]any `json:"databricks_query,omitempty"`
Recipient map[string]any `json:"databricks_recipient,omitempty"`
RegisteredModel map[string]any `json:"databricks_registered_model,omitempty"`
Repo map[string]any `json:"databricks_repo,omitempty"`
@ -107,6 +110,7 @@ type Resources struct {
func NewResources() *Resources {
return &Resources{
AccessControlRuleSet: make(map[string]any),
Alert: make(map[string]any),
ArtifactAllowlist: make(map[string]any),
AutomaticClusterUpdateWorkspaceSetting: make(map[string]any),
AwsS3Mount: make(map[string]any),
@ -120,6 +124,7 @@ func NewResources() *Resources {
ClusterPolicy: make(map[string]any),
ComplianceSecurityProfileWorkspaceSetting: make(map[string]any),
Connection: make(map[string]any),
CustomAppIntegration: make(map[string]any),
Dashboard: make(map[string]any),
DbfsFile: make(map[string]any),
DefaultNamespaceSetting: make(map[string]any),
@ -171,6 +176,7 @@ func NewResources() *Resources {
Pipeline: make(map[string]any),
Provider: make(map[string]any),
QualityMonitor: make(map[string]any),
Query: make(map[string]any),
Recipient: make(map[string]any),
RegisteredModel: make(map[string]any),
Repo: make(map[string]any),

View File

@ -21,7 +21,7 @@ type Root struct {
const ProviderHost = "registry.terraform.io"
const ProviderSource = "databricks/databricks"
const ProviderVersion = "1.54.0"
const ProviderVersion = "1.58.0"
func NewRoot() *Root {
return &Root{

View File

@ -519,6 +519,10 @@ func TestRenderSummary(t *testing.T) {
URL: "https://url2",
JobSettings: &jobs.JobSettings{Name: "job2-name"},
},
"job3": {
ID: "3",
URL: "https://url3", // This emulates deleted job
},
},
Pipelines: map[string]*resources.Pipeline{
"pipeline2": {

View File

@ -6,6 +6,8 @@ import (
"github.com/databricks/cli/bundle"
"github.com/databricks/cli/bundle/config"
"github.com/databricks/cli/bundle/config/resources"
"github.com/databricks/databricks-sdk-go/service/jobs"
"github.com/databricks/databricks-sdk-go/service/pipelines"
"github.com/stretchr/testify/assert"
)
@ -14,11 +16,17 @@ func TestCompletions_SkipDuplicates(t *testing.T) {
Config: config.Root{
Resources: config.Resources{
Jobs: map[string]*resources.Job{
"foo": {},
"bar": {},
"foo": {
JobSettings: &jobs.JobSettings{},
},
"bar": {
JobSettings: &jobs.JobSettings{},
},
},
Pipelines: map[string]*resources.Pipeline{
"foo": {},
"foo": {
PipelineSpec: &pipelines.PipelineSpec{},
},
},
},
},
@ -36,10 +44,14 @@ func TestCompletions_Filter(t *testing.T) {
Config: config.Root{
Resources: config.Resources{
Jobs: map[string]*resources.Job{
"foo": {},
"foo": {
JobSettings: &jobs.JobSettings{},
},
},
Pipelines: map[string]*resources.Pipeline{
"bar": {},
"bar": {
PipelineSpec: &pipelines.PipelineSpec{},
},
},
},
},

View File

@ -7,6 +7,7 @@ import (
"github.com/databricks/cli/bundle/config"
"github.com/databricks/cli/bundle/config/resources"
"github.com/databricks/databricks-sdk-go/service/jobs"
"github.com/databricks/databricks-sdk-go/service/pipelines"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
@ -28,8 +29,12 @@ func TestLookup_NotFound(t *testing.T) {
Config: config.Root{
Resources: config.Resources{
Jobs: map[string]*resources.Job{
"foo": {},
"bar": {},
"foo": {
JobSettings: &jobs.JobSettings{},
},
"bar": {
JobSettings: &jobs.JobSettings{},
},
},
},
},
@ -45,10 +50,14 @@ func TestLookup_MultipleFound(t *testing.T) {
Config: config.Root{
Resources: config.Resources{
Jobs: map[string]*resources.Job{
"foo": {},
"foo": {
JobSettings: &jobs.JobSettings{},
},
},
Pipelines: map[string]*resources.Pipeline{
"foo": {},
"foo": {
PipelineSpec: &pipelines.PipelineSpec{},
},
},
},
},
@ -92,10 +101,14 @@ func TestLookup_NominalWithFilters(t *testing.T) {
Config: config.Root{
Resources: config.Resources{
Jobs: map[string]*resources.Job{
"foo": {},
"foo": {
JobSettings: &jobs.JobSettings{},
},
},
Pipelines: map[string]*resources.Pipeline{
"bar": {},
"bar": {
PipelineSpec: &pipelines.PipelineSpec{},
},
},
},
},

View File

@ -59,9 +59,14 @@ func TestJsonSchema(t *testing.T) {
}
// Assert enum values are loaded
schedule := walk(s.Definitions, "github.com", "databricks", "databricks-sdk-go", "service", "catalog.MonitorCronSchedule")
assert.Contains(t, schedule.AnyOf[0].Properties["pause_status"].Enum, "PAUSED")
assert.Contains(t, schedule.AnyOf[0].Properties["pause_status"].Enum, "UNPAUSED")
schedule := walk(s.Definitions, "github.com", "databricks", "databricks-sdk-go", "service", "pipelines.RestartWindow")
assert.Contains(t, schedule.AnyOf[0].Properties["days_of_week"].Enum, "MONDAY")
assert.Contains(t, schedule.AnyOf[0].Properties["days_of_week"].Enum, "TUESDAY")
assert.Contains(t, schedule.AnyOf[0].Properties["days_of_week"].Enum, "WEDNESDAY")
assert.Contains(t, schedule.AnyOf[0].Properties["days_of_week"].Enum, "THURSDAY")
assert.Contains(t, schedule.AnyOf[0].Properties["days_of_week"].Enum, "FRIDAY")
assert.Contains(t, schedule.AnyOf[0].Properties["days_of_week"].Enum, "SATURDAY")
assert.Contains(t, schedule.AnyOf[0].Properties["days_of_week"].Enum, "SUNDAY")
providers := walk(s.Definitions, "github.com", "databricks", "databricks-sdk-go", "service", "jobs.GitProvider")
assert.Contains(t, providers.Enum, "gitHub")

View File

@ -185,6 +185,14 @@
{
"type": "object",
"properties": {
"create_time": {
"description": "The timestamp of when the dashboard was created.",
"$ref": "#/$defs/string"
},
"dashboard_id": {
"description": "UUID identifying the dashboard.",
"$ref": "#/$defs/string"
},
"display_name": {
"description": "The display name of the dashboard.",
"$ref": "#/$defs/string"
@ -192,13 +200,25 @@
"embed_credentials": {
"$ref": "#/$defs/bool"
},
"etag": {
"description": "The etag for the dashboard. Can be optionally provided on updates to ensure that the dashboard\nhas not been modified since the last read.\nThis field is excluded in List Dashboards responses.",
"$ref": "#/$defs/string"
},
"file_path": {
"$ref": "#/$defs/string"
},
"lifecycle_state": {
"description": "The state of the dashboard resource. Used for tracking trashed status.",
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/dashboards.LifecycleState"
},
"parent_path": {
"description": "The workspace path of the folder containing the dashboard. Includes leading slash and no\ntrailing slash.\nThis field is excluded in List Dashboards responses.",
"$ref": "#/$defs/string"
},
"path": {
"description": "The workspace path of the dashboard asset, including the file name.\nExported dashboards always have the file extension `.lvdash.json`.\nThis field is excluded in List Dashboards responses.",
"$ref": "#/$defs/string"
},
"permissions": {
"$ref": "#/$defs/slice/github.com/databricks/cli/bundle/config/resources.Permission"
},
@ -206,15 +226,16 @@
"description": "The contents of the dashboard in serialized string form.\nThis field is excluded in List Dashboards responses.\nUse the [get dashboard API](https://docs.databricks.com/api/workspace/lakeview/get)\nto retrieve an example response, which includes the `serialized_dashboard` field.\nThis field provides the structure of the JSON string that represents the dashboard's\nlayout and components.",
"$ref": "#/$defs/interface"
},
"update_time": {
"description": "The timestamp of when the dashboard was last updated by the user.\nThis field is excluded in List Dashboards responses.",
"$ref": "#/$defs/string"
},
"warehouse_id": {
"description": "The warehouse ID used to run the dashboard.",
"$ref": "#/$defs/string"
}
},
"additionalProperties": false,
"required": [
"display_name"
]
"additionalProperties": false
},
{
"type": "string",
@ -551,7 +572,7 @@
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/pipelines.Filters"
},
"gateway_definition": {
"description": "The definition of a gateway pipeline to support CDC.",
"description": "The definition of a gateway pipeline to support change data capture.",
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/pipelines.IngestionGatewayPipelineDefinition"
},
"id": {
@ -581,6 +602,10 @@
"description": "Whether Photon is enabled for this pipeline.",
"$ref": "#/$defs/bool"
},
"restart_window": {
"description": "Restart window of this pipeline.",
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/pipelines.RestartWindow"
},
"schema": {
"description": "The default schema (database) where tables are read from or published to. The presence of this field implies that the pipeline is in direct publishing mode.",
"$ref": "#/$defs/string"
@ -659,6 +684,9 @@
"description": "Configuration for monitoring snapshot tables.",
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/catalog.MonitorSnapshot"
},
"table_name": {
"$ref": "#/$defs/string"
},
"time_series": {
"description": "Configuration for monitoring time series tables.",
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/catalog.MonitorTimeSeries"
@ -670,6 +698,7 @@
},
"additionalProperties": false,
"required": [
"table_name",
"assets_dir",
"output_schema_name"
]
@ -788,6 +817,9 @@
"metastore": {
"$ref": "#/$defs/string"
},
"notification_destination": {
"$ref": "#/$defs/string"
},
"pipeline": {
"$ref": "#/$defs/string"
},
@ -1050,6 +1082,9 @@
"pipelines_development": {
"$ref": "#/$defs/bool"
},
"source_linked_deployment": {
"$ref": "#/$defs/bool"
},
"tags": {
"$ref": "#/$defs/map/string"
},
@ -1289,11 +1324,7 @@
"properties": {
"pause_status": {
"description": "Read only field that indicates whether a schedule is paused or not.",
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/catalog.MonitorCronSchedulePauseStatus",
"enum": [
"UNPAUSED",
"PAUSED"
]
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/catalog.MonitorCronSchedulePauseStatus"
},
"quartz_cron_expression": {
"description": "The expression that determines when to run the monitor. See [examples](https://www.quartz-scheduler.org/documentation/quartz-2.3.0/tutorials/crontrigger.html).\n",
@ -1317,7 +1348,12 @@
]
},
"catalog.MonitorCronSchedulePauseStatus": {
"type": "string"
"type": "string",
"description": "Read only field that indicates whether a schedule is paused or not.",
"enum": [
"UNPAUSED",
"PAUSED"
]
},
"catalog.MonitorDataClassificationConfig": {
"anyOf": [
@ -1382,11 +1418,7 @@
},
"problem_type": {
"description": "Problem type the model aims to solve. Determines the type of model-quality metrics that will be computed.",
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/catalog.MonitorInferenceLogProblemType",
"enum": [
"PROBLEM_TYPE_CLASSIFICATION",
"PROBLEM_TYPE_REGRESSION"
]
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/catalog.MonitorInferenceLogProblemType"
},
"timestamp_col": {
"description": "Column that contains the timestamps of requests. The column must be one of the following:\n- A ``TimestampType`` column\n- A column whose values can be converted to timestamps through the pyspark\n ``to_timestamp`` [function](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.to_timestamp.html).\n",
@ -1409,7 +1441,12 @@
]
},
"catalog.MonitorInferenceLogProblemType": {
"type": "string"
"type": "string",
"description": "Problem type the model aims to solve. Determines the type of model-quality metrics that will be computed.",
"enum": [
"PROBLEM_TYPE_CLASSIFICATION",
"PROBLEM_TYPE_REGRESSION"
]
},
"catalog.MonitorMetric": {
"anyOf": [
@ -1434,12 +1471,7 @@
},
"type": {
"description": "Can only be one of ``\"CUSTOM_METRIC_TYPE_AGGREGATE\"``, ``\"CUSTOM_METRIC_TYPE_DERIVED\"``, or ``\"CUSTOM_METRIC_TYPE_DRIFT\"``.\nThe ``\"CUSTOM_METRIC_TYPE_AGGREGATE\"`` and ``\"CUSTOM_METRIC_TYPE_DERIVED\"`` metrics\nare computed on a single table, whereas the ``\"CUSTOM_METRIC_TYPE_DRIFT\"`` compare metrics across\nbaseline and input table, or across the two consecutive time windows.\n- CUSTOM_METRIC_TYPE_AGGREGATE: only depend on the existing columns in your table\n- CUSTOM_METRIC_TYPE_DERIVED: depend on previously computed aggregate metrics\n- CUSTOM_METRIC_TYPE_DRIFT: depend on previously computed aggregate or derived metrics\n",
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/catalog.MonitorMetricType",
"enum": [
"CUSTOM_METRIC_TYPE_AGGREGATE",
"CUSTOM_METRIC_TYPE_DERIVED",
"CUSTOM_METRIC_TYPE_DRIFT"
]
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/catalog.MonitorMetricType"
}
},
"additionalProperties": false,
@ -1458,7 +1490,13 @@
]
},
"catalog.MonitorMetricType": {
"type": "string"
"type": "string",
"description": "Can only be one of ``\"CUSTOM_METRIC_TYPE_AGGREGATE\"``, ``\"CUSTOM_METRIC_TYPE_DERIVED\"``, or ``\"CUSTOM_METRIC_TYPE_DRIFT\"``.\nThe ``\"CUSTOM_METRIC_TYPE_AGGREGATE\"`` and ``\"CUSTOM_METRIC_TYPE_DERIVED\"`` metrics\nare computed on a single table, whereas the ``\"CUSTOM_METRIC_TYPE_DRIFT\"`` compare metrics across\nbaseline and input table, or across the two consecutive time windows.\n- CUSTOM_METRIC_TYPE_AGGREGATE: only depend on the existing columns in your table\n- CUSTOM_METRIC_TYPE_DERIVED: depend on previously computed aggregate metrics\n- CUSTOM_METRIC_TYPE_DRIFT: depend on previously computed aggregate or derived metrics\n",
"enum": [
"CUSTOM_METRIC_TYPE_AGGREGATE",
"CUSTOM_METRIC_TYPE_DERIVED",
"CUSTOM_METRIC_TYPE_DRIFT"
]
},
"catalog.MonitorNotifications": {
"anyOf": [
@ -2325,6 +2363,13 @@
}
]
},
"dashboards.LifecycleState": {
"type": "string",
"enum": [
"ACTIVE",
"TRASHED"
]
},
"jobs.Condition": {
"type": "string",
"enum": [
@ -2785,7 +2830,7 @@
"anyOf": [
{
"type": "object",
"description": "Write-only setting. Specifies the user, service principal or group that the job/pipeline runs as. If not specified, the job/pipeline runs as the user who created the job/pipeline.\n\nExactly one of `user_name`, `service_principal_name`, `group_name` should be specified. If not, an error is thrown.",
"description": "Write-only setting. Specifies the user, service principal or group that the job/pipeline runs as. If not specified, the job/pipeline runs as the user who created the job/pipeline.\n\nEither `user_name` or `service_principal_name` should be specified. If not, an error is thrown.",
"properties": {
"service_principal_name": {
"description": "Application ID of an active service principal. Setting this field requires the `servicePrincipal/user` role.",
@ -3102,7 +3147,7 @@
"$ref": "#/$defs/slice/string"
},
"jar_params": {
"description": "A list of parameters for jobs with Spark JAR tasks, for example `\"jar_params\": [\"john doe\", \"35\"]`.\nThe parameters are used to invoke the main function of the main class specified in the Spark JAR task.\nIf not specified upon `run-now`, it defaults to an empty list.\njar_params cannot be specified in conjunction with notebook_params.\nThe JSON representation of this field (for example `{\"jar_params\":[\"john doe\",\"35\"]}`) cannot exceed 10,000 bytes.\n\nUse [Task parameter variables](/jobs.html\\\"#parameter-variables\\\") to set parameters containing information about job runs.",
"description": "A list of parameters for jobs with Spark JAR tasks, for example `\"jar_params\": [\"john doe\", \"35\"]`.\nThe parameters are used to invoke the main function of the main class specified in the Spark JAR task.\nIf not specified upon `run-now`, it defaults to an empty list.\njar_params cannot be specified in conjunction with notebook_params.\nThe JSON representation of this field (for example `{\"jar_params\":[\"john doe\",\"35\"]}`) cannot exceed 10,000 bytes.\n\nUse [Task parameter variables](https://docs.databricks.com/jobs.html#parameter-variables) to set parameters containing information about job runs.",
"$ref": "#/$defs/slice/string"
},
"job_id": {
@ -3436,11 +3481,11 @@
"type": "object",
"properties": {
"condition_task": {
"description": "If condition_task, specifies a condition with an outcome that can be used to control the execution of other tasks. Does not require a cluster to execute and does not support retries or notifications.",
"description": "The task evaluates a condition that can be used to control the execution of other tasks when the `condition_task` field is present.\nThe condition task does not require a cluster to execute and does not support retries or notifications.",
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.ConditionTask"
},
"dbt_task": {
"description": "If dbt_task, indicates that this must execute a dbt task. It requires both Databricks SQL and the ability to use a serverless or a pro SQL warehouse.",
"description": "The task runs one or more dbt commands when the `dbt_task` field is present. The dbt task requires both Databricks SQL and the ability to use a serverless or a pro SQL warehouse.",
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.DbtTask"
},
"depends_on": {
@ -3468,7 +3513,7 @@
"$ref": "#/$defs/string"
},
"for_each_task": {
"description": "If for_each_task, indicates that this task must execute the nested task within it.",
"description": "The task executes a nested task for every input provided when the `for_each_task` field is present.",
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.ForEachTask"
},
"health": {
@ -3495,7 +3540,7 @@
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/compute.ClusterSpec"
},
"notebook_task": {
"description": "If notebook_task, indicates that this task must run a notebook. This field may not be specified in conjunction with spark_jar_task.",
"description": "The task runs a notebook when the `notebook_task` field is present.",
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.NotebookTask"
},
"notification_settings": {
@ -3503,11 +3548,11 @@
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.TaskNotificationSettings"
},
"pipeline_task": {
"description": "If pipeline_task, indicates that this task must execute a Pipeline.",
"description": "The task triggers a pipeline update when the `pipeline_task` field is present. Only pipelines configured to use triggered more are supported.",
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.PipelineTask"
},
"python_wheel_task": {
"description": "If python_wheel_task, indicates that this job must execute a PythonWheel.",
"description": "The task runs a Python wheel when the `python_wheel_task` field is present.",
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.PythonWheelTask"
},
"retry_on_timeout": {
@ -3519,23 +3564,23 @@
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.RunIf"
},
"run_job_task": {
"description": "If run_job_task, indicates that this task must execute another job.",
"description": "The task triggers another job when the `run_job_task` field is present.",
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.RunJobTask"
},
"spark_jar_task": {
"description": "If spark_jar_task, indicates that this task must run a JAR.",
"description": "The task runs a JAR when the `spark_jar_task` field is present.",
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.SparkJarTask"
},
"spark_python_task": {
"description": "If spark_python_task, indicates that this task must run a Python file.",
"description": "The task runs a Python file when the `spark_python_task` field is present.",
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.SparkPythonTask"
},
"spark_submit_task": {
"description": "If `spark_submit_task`, indicates that this task must be launched by the spark submit script. This task can run only on new clusters.\n\nIn the `new_cluster` specification, `libraries` and `spark_conf` are not supported. Instead, use `--jars` and `--py-files` to add Java and Python libraries and `--conf` to set the Spark configurations.\n\n`master`, `deploy-mode`, and `executor-cores` are automatically configured by Databricks; you _cannot_ specify them in parameters.\n\nBy default, the Spark submit job uses all available memory (excluding reserved memory for Databricks services). You can set `--driver-memory`, and `--executor-memory` to a smaller value to leave some room for off-heap usage.\n\nThe `--jars`, `--py-files`, `--files` arguments support DBFS and S3 paths.",
"description": "(Legacy) The task runs the spark-submit script when the `spark_submit_task` field is present. This task can run only on new clusters and is not compatible with serverless compute.\n\nIn the `new_cluster` specification, `libraries` and `spark_conf` are not supported. Instead, use `--jars` and `--py-files` to add Java and Python libraries and `--conf` to set the Spark configurations.\n\n`master`, `deploy-mode`, and `executor-cores` are automatically configured by Databricks; you _cannot_ specify them in parameters.\n\nBy default, the Spark submit job uses all available memory (excluding reserved memory for Databricks services). You can set `--driver-memory`, and `--executor-memory` to a smaller value to leave some room for off-heap usage.\n\nThe `--jars`, `--py-files`, `--files` arguments support DBFS and S3 paths.",
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.SparkSubmitTask"
},
"sql_task": {
"description": "If sql_task, indicates that this job must execute a SQL task.",
"description": "The task runs a SQL query or file, or it refreshes a SQL alert or a legacy SQL dashboard when the `sql_task` field is present.",
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.SqlTask"
},
"task_key": {
@ -3821,12 +3866,7 @@
},
"status": {
"description": "Current status of `model_version`",
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/ml.ModelVersionStatus",
"enum": [
"PENDING_REGISTRATION",
"FAILED_REGISTRATION",
"READY"
]
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/ml.ModelVersionStatus"
},
"status_message": {
"description": "Details on current `status`, if it is pending or failed.",
@ -3854,7 +3894,13 @@
]
},
"ml.ModelVersionStatus": {
"type": "string"
"type": "string",
"description": "Current status of `model_version`",
"enum": [
"PENDING_REGISTRATION",
"FAILED_REGISTRATION",
"READY"
]
},
"ml.ModelVersionTag": {
"anyOf": [
@ -3951,15 +3997,15 @@
"type": "object",
"properties": {
"report": {
"description": "Select tables from a specific source report.",
"description": "Select a specific source report.",
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/pipelines.ReportSpec"
},
"schema": {
"description": "Select tables from a specific source schema.",
"description": "Select all tables from a specific source schema.",
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/pipelines.SchemaSpec"
},
"table": {
"description": "Select tables from a specific source table.",
"description": "Select a specific source table.",
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/pipelines.TableSpec"
}
},
@ -3977,7 +4023,11 @@
"type": "object",
"properties": {
"connection_id": {
"description": "Immutable. The Unity Catalog connection this gateway pipeline uses to communicate with the source.",
"description": "[Deprecated, use connection_name instead] Immutable. The Unity Catalog connection that this gateway pipeline uses to communicate with the source.",
"$ref": "#/$defs/string"
},
"connection_name": {
"description": "Immutable. The Unity Catalog connection that this gateway pipeline uses to communicate with the source.",
"$ref": "#/$defs/string"
},
"gateway_storage_catalog": {
@ -4007,11 +4057,11 @@
"type": "object",
"properties": {
"connection_name": {
"description": "Immutable. The Unity Catalog connection this ingestion pipeline uses to communicate with the source. Specify either ingestion_gateway_id or connection_name.",
"description": "Immutable. The Unity Catalog connection that this ingestion pipeline uses to communicate with the source. This is used with connectors for applications like Salesforce, Workday, and so on.",
"$ref": "#/$defs/string"
},
"ingestion_gateway_id": {
"description": "Immutable. Identifier for the ingestion gateway used by this ingestion pipeline to communicate with the source. Specify either ingestion_gateway_id or connection_name.",
"description": "Immutable. Identifier for the gateway that is used by this ingestion pipeline to communicate with the source database. This is used with connectors to databases like SQL Server.",
"$ref": "#/$defs/string"
},
"objects": {
@ -4188,11 +4238,7 @@
},
"mode": {
"description": "Databricks Enhanced Autoscaling optimizes cluster utilization by automatically\nallocating cluster resources based on workload volume, with minimal impact to\nthe data processing latency of your pipelines. Enhanced Autoscaling is available\nfor `updates` clusters only. The legacy autoscaling feature is used for `maintenance`\nclusters.\n",
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/pipelines.PipelineClusterAutoscaleMode",
"enum": [
"ENHANCED",
"LEGACY"
]
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/pipelines.PipelineClusterAutoscaleMode"
}
},
"additionalProperties": false,
@ -4208,7 +4254,12 @@
]
},
"pipelines.PipelineClusterAutoscaleMode": {
"type": "string"
"type": "string",
"description": "Databricks Enhanced Autoscaling optimizes cluster utilization by automatically\nallocating cluster resources based on workload volume, with minimal impact to\nthe data processing latency of your pipelines. Enhanced Autoscaling is available\nfor `updates` clusters only. The legacy autoscaling feature is used for `maintenance`\nclusters.\n",
"enum": [
"ENHANCED",
"LEGACY"
]
},
"pipelines.PipelineDeployment": {
"anyOf": [
@ -4320,6 +4371,47 @@
}
]
},
"pipelines.RestartWindow": {
"anyOf": [
{
"type": "object",
"properties": {
"days_of_week": {
"description": "Days of week in which the restart is allowed to happen (within a five-hour window starting at start_hour).\nIf not specified all days of the week will be used.",
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/pipelines.RestartWindowDaysOfWeek",
"enum": [
"MONDAY",
"TUESDAY",
"WEDNESDAY",
"THURSDAY",
"FRIDAY",
"SATURDAY",
"SUNDAY"
]
},
"start_hour": {
"description": "An integer between 0 and 23 denoting the start hour for the restart window in the 24-hour day.\nContinuous pipeline restart is triggered only within a five-hour window starting at this hour.",
"$ref": "#/$defs/int"
},
"time_zone_id": {
"description": "Time zone id of restart window. See https://docs.databricks.com/sql/language-manual/sql-ref-syntax-aux-conf-mgmt-set-timezone.html for details.\nIf not specified, UTC will be used.",
"$ref": "#/$defs/string"
}
},
"additionalProperties": false,
"required": [
"start_hour"
]
},
{
"type": "string",
"pattern": "\\$\\{(var(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)+)\\}"
}
]
},
"pipelines.RestartWindowDaysOfWeek": {
"type": "string"
},
"pipelines.SchemaSpec": {
"anyOf": [
{
@ -4411,11 +4503,7 @@
},
"scd_type": {
"description": "The SCD type to use to ingest the table.",
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/pipelines.TableSpecificConfigScdType",
"enum": [
"SCD_TYPE_1",
"SCD_TYPE_2"
]
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/pipelines.TableSpecificConfigScdType"
},
"sequence_by": {
"description": "The column names specifying the logical order of events in the source data. Delta Live Tables uses this sequencing to handle change events that arrive out of order.",
@ -4431,7 +4519,12 @@
]
},
"pipelines.TableSpecificConfigScdType": {
"type": "string"
"type": "string",
"description": "The SCD type to use to ingest the table.",
"enum": [
"SCD_TYPE_1",
"SCD_TYPE_2"
]
},
"serving.Ai21LabsConfig": {
"anyOf": [
@ -4520,11 +4613,7 @@
"properties": {
"behavior": {
"description": "Behavior for PII filter. Currently only 'BLOCK' is supported. If 'BLOCK' is set for the input guardrail and the request contains PII, the request is not sent to the model server and 400 status code is returned; if 'BLOCK' is set for the output guardrail and the model response contains PII, the PII info in the response is redacted and 400 status code is returned.",
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/serving.AiGatewayGuardrailPiiBehaviorBehavior",
"enum": [
"NONE",
"BLOCK"
]
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/serving.AiGatewayGuardrailPiiBehaviorBehavior"
}
},
"additionalProperties": false,
@ -4539,7 +4628,12 @@
]
},
"serving.AiGatewayGuardrailPiiBehaviorBehavior": {
"type": "string"
"type": "string",
"description": "Behavior for PII filter. Currently only 'BLOCK' is supported. If 'BLOCK' is set for the input guardrail and the request contains PII, the request is not sent to the model server and 400 status code is returned; if 'BLOCK' is set for the output guardrail and the model response contains PII, the PII info in the response is redacted and 400 status code is returned.",
"enum": [
"NONE",
"BLOCK"
]
},
"serving.AiGatewayGuardrails": {
"anyOf": [
@ -4604,18 +4698,11 @@
},
"key": {
"description": "Key field for a rate limit. Currently, only 'user' and 'endpoint' are supported, with 'endpoint' being the default if not specified.",
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/serving.AiGatewayRateLimitKey",
"enum": [
"user",
"endpoint"
]
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/serving.AiGatewayRateLimitKey"
},
"renewal_period": {
"description": "Renewal period field for a rate limit. Currently, only 'minute' is supported.",
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/serving.AiGatewayRateLimitRenewalPeriod",
"enum": [
"minute"
]
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/serving.AiGatewayRateLimitRenewalPeriod"
}
},
"additionalProperties": false,
@ -4631,10 +4718,19 @@
]
},
"serving.AiGatewayRateLimitKey": {
"type": "string"
"type": "string",
"description": "Key field for a rate limit. Currently, only 'user' and 'endpoint' are supported, with 'endpoint' being the default if not specified.",
"enum": [
"user",
"endpoint"
]
},
"serving.AiGatewayRateLimitRenewalPeriod": {
"type": "string"
"type": "string",
"description": "Renewal period field for a rate limit. Currently, only 'minute' is supported.",
"enum": [
"minute"
]
},
"serving.AiGatewayUsageTrackingConfig": {
"anyOf": [
@ -4681,13 +4777,7 @@
},
"bedrock_provider": {
"description": "The underlying provider in Amazon Bedrock. Supported values (case insensitive) include: Anthropic, Cohere, AI21Labs, Amazon.",
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/serving.AmazonBedrockConfigBedrockProvider",
"enum": [
"anthropic",
"cohere",
"ai21labs",
"amazon"
]
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/serving.AmazonBedrockConfigBedrockProvider"
}
},
"additionalProperties": false,
@ -4703,7 +4793,14 @@
]
},
"serving.AmazonBedrockConfigBedrockProvider": {
"type": "string"
"type": "string",
"description": "The underlying provider in Amazon Bedrock. Supported values (case insensitive) include: Anthropic, Cohere, AI21Labs, Amazon.",
"enum": [
"anthropic",
"cohere",
"ai21labs",
"amazon"
]
},
"serving.AnthropicConfig": {
"anyOf": [
@ -4910,17 +5007,7 @@
},
"provider": {
"description": "The name of the provider for the external model. Currently, the supported providers are 'ai21labs', 'anthropic',\n'amazon-bedrock', 'cohere', 'databricks-model-serving', 'google-cloud-vertex-ai', 'openai', and 'palm'.\",\n",
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/serving.ExternalModelProvider",
"enum": [
"ai21labs",
"anthropic",
"amazon-bedrock",
"cohere",
"databricks-model-serving",
"google-cloud-vertex-ai",
"openai",
"palm"
]
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/serving.ExternalModelProvider"
},
"task": {
"description": "The task type of the external model.",
@ -4941,7 +5028,18 @@
]
},
"serving.ExternalModelProvider": {
"type": "string"
"type": "string",
"description": "The name of the provider for the external model. Currently, the supported providers are 'ai21labs', 'anthropic',\n'amazon-bedrock', 'cohere', 'databricks-model-serving', 'google-cloud-vertex-ai', 'openai', and 'palm'.\",\n",
"enum": [
"ai21labs",
"anthropic",
"amazon-bedrock",
"cohere",
"databricks-model-serving",
"google-cloud-vertex-ai",
"openai",
"palm"
]
},
"serving.GoogleCloudVertexAiConfig": {
"anyOf": [
@ -5047,18 +5145,11 @@
},
"key": {
"description": "Key field for a serving endpoint rate limit. Currently, only 'user' and 'endpoint' are supported, with 'endpoint' being the default if not specified.",
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/serving.RateLimitKey",
"enum": [
"user",
"endpoint"
]
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/serving.RateLimitKey"
},
"renewal_period": {
"description": "Renewal period field for a serving endpoint rate limit. Currently, only 'minute' is supported.",
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/serving.RateLimitRenewalPeriod",
"enum": [
"minute"
]
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/serving.RateLimitRenewalPeriod"
}
},
"additionalProperties": false,
@ -5074,10 +5165,19 @@
]
},
"serving.RateLimitKey": {
"type": "string"
"type": "string",
"description": "Key field for a serving endpoint rate limit. Currently, only 'user' and 'endpoint' are supported, with 'endpoint' being the default if not specified.",
"enum": [
"user",
"endpoint"
]
},
"serving.RateLimitRenewalPeriod": {
"type": "string"
"type": "string",
"description": "Renewal period field for a serving endpoint rate limit. Currently, only 'minute' is supported.",
"enum": [
"minute"
]
},
"serving.Route": {
"anyOf": [
@ -5202,23 +5302,11 @@
},
"workload_size": {
"description": "The workload size of the served model. The workload size corresponds to a range of provisioned concurrency that the compute will autoscale between.\nA single unit of provisioned concurrency can process one request at a time.\nValid workload sizes are \"Small\" (4 - 4 provisioned concurrency), \"Medium\" (8 - 16 provisioned concurrency), and \"Large\" (16 - 64 provisioned concurrency).\nIf scale-to-zero is enabled, the lower bound of the provisioned concurrency for each workload size will be 0.\n",
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/serving.ServedModelInputWorkloadSize",
"enum": [
"Small",
"Medium",
"Large"
]
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/serving.ServedModelInputWorkloadSize"
},
"workload_type": {
"description": "The workload type of the served model. The workload type selects which type of compute to use in the endpoint. The default value for this parameter is\n\"CPU\". For deep learning workloads, GPU acceleration is available by selecting workload types like GPU_SMALL and others.\nSee the available [GPU types](https://docs.databricks.com/machine-learning/model-serving/create-manage-serving-endpoints.html#gpu-workload-types).\n",
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/serving.ServedModelInputWorkloadType",
"enum": [
"CPU",
"GPU_SMALL",
"GPU_MEDIUM",
"GPU_LARGE",
"MULTIGPU_MEDIUM"
]
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/serving.ServedModelInputWorkloadType"
}
},
"additionalProperties": false,
@ -5235,10 +5323,24 @@
]
},
"serving.ServedModelInputWorkloadSize": {
"type": "string"
"type": "string",
"description": "The workload size of the served model. The workload size corresponds to a range of provisioned concurrency that the compute will autoscale between.\nA single unit of provisioned concurrency can process one request at a time.\nValid workload sizes are \"Small\" (4 - 4 provisioned concurrency), \"Medium\" (8 - 16 provisioned concurrency), and \"Large\" (16 - 64 provisioned concurrency).\nIf scale-to-zero is enabled, the lower bound of the provisioned concurrency for each workload size will be 0.\n",
"enum": [
"Small",
"Medium",
"Large"
]
},
"serving.ServedModelInputWorkloadType": {
"type": "string"
"type": "string",
"description": "The workload type of the served model. The workload type selects which type of compute to use in the endpoint. The default value for this parameter is\n\"CPU\". For deep learning workloads, GPU acceleration is available by selecting workload types like GPU_SMALL and others.\nSee the available [GPU types](https://docs.databricks.com/machine-learning/model-serving/create-manage-serving-endpoints.html#gpu-workload-types).\n",
"enum": [
"CPU",
"GPU_SMALL",
"GPU_MEDIUM",
"GPU_LARGE",
"MULTIGPU_MEDIUM"
]
},
"serving.TrafficConfig": {
"anyOf": [

View File

@ -10,7 +10,7 @@ artifacts:
resources:
jobs:
test_job:
name: "[${bundle.environment}] My Wheel Job"
name: "[${bundle.target}] My Wheel Job"
tasks:
- task_key: TestTask
existing_cluster_id: "0717-132531-5opeqon1"

View File

@ -4,7 +4,7 @@ bundle:
resources:
jobs:
test_job:
name: "[${bundle.environment}] My Wheel Job"
name: "[${bundle.target}] My Wheel Job"
tasks:
- task_key: TestTask
existing_cluster_id: "0717-132531-5opeqon1"

View File

@ -14,7 +14,7 @@ artifacts:
resources:
jobs:
test_job:
name: "[${bundle.environment}] My Wheel Job"
name: "[${bundle.target}] My Wheel Job"
tasks:
- task_key: TestTask
existing_cluster_id: "0717-132531-5opeqon1"

View File

@ -4,7 +4,7 @@ bundle:
resources:
jobs:
test_job:
name: "[${bundle.environment}] My Wheel Job"
name: "[${bundle.target}] My Wheel Job"
tasks:
- task_key: TestTask
existing_cluster_id: "0717-aaaaa-bbbbbb"

View File

@ -7,7 +7,7 @@ workspace:
resources:
jobs:
test_job:
name: "[${bundle.environment}] My Wheel Job"
name: "[${bundle.target}] My Wheel Job"
tasks:
- task_key: TestTask
existing_cluster_id: "0717-aaaaa-bbbbbb"

View File

@ -4,7 +4,7 @@ bundle:
resources:
jobs:
test_job:
name: "[${bundle.environment}] My Wheel Job"
name: "[${bundle.target}] My Wheel Job"
tasks:
- task_key: TestTask
existing_cluster_id: "0717-aaaaa-bbbbbb"

View File

@ -4,7 +4,7 @@ bundle:
resources:
jobs:
test_job:
name: "[${bundle.environment}] My Wheel Job"
name: "[${bundle.target}] My Wheel Job"
tasks:
- task_key: TestTask
existing_cluster_id: "0717-132531-5opeqon1"

View File

@ -6,6 +6,7 @@ import (
"strings"
"github.com/databricks/cli/bundle"
"github.com/databricks/cli/bundle/config"
"github.com/databricks/cli/bundle/libraries"
"github.com/databricks/cli/libs/diag"
"github.com/databricks/cli/libs/log"
@ -22,6 +23,9 @@ func WrapperWarning() bundle.Mutator {
func (m *wrapperWarning) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
if isPythonWheelWrapperOn(b) {
if config.IsExplicitlyEnabled(b.Config.Presets.SourceLinkedDeployment) {
return diag.Warningf("Python wheel notebook wrapper is not available when using source-linked deployment mode. You can disable this mode by setting 'presets.source_linked_deployment: false'")
}
return nil
}

View File

@ -194,7 +194,7 @@ func newGet() *cobra.Command {
configuration are specified by ID.
Arguments:
BUDGET_ID: The Databricks budget configuration ID.`
BUDGET_ID: The budget configuration ID`
cmd.Annotations = make(map[string]string)

View File

@ -195,7 +195,10 @@ func newGet() *cobra.Command {
cmd.Short = `Get OAuth Custom App Integration.`
cmd.Long = `Get OAuth Custom App Integration.
Gets the Custom OAuth App Integration for the given integration id.`
Gets the Custom OAuth App Integration for the given integration id.
Arguments:
INTEGRATION_ID: The OAuth app integration ID.`
cmd.Annotations = make(map[string]string)

View File

@ -191,6 +191,8 @@ func newList() *cobra.Command {
// TODO: short flags
cmd.Flags().StringVar(&listReq.PageToken, "page-token", listReq.PageToken, `An opaque page token which was the next_page_token in the response of the previous request to list the secrets for this service principal.`)
cmd.Use = "list SERVICE_PRINCIPAL_ID"
cmd.Short = `List service principal secrets.`
cmd.Long = `List service principal secrets.

View File

@ -257,7 +257,7 @@ func newUpdate() *cobra.Command {
workspace for the specified principal.
Arguments:
WORKSPACE_ID: The workspace ID for the account.
WORKSPACE_ID: The workspace ID.
PRINCIPAL_ID: The ID of the user, service principal, or group.`
cmd.Annotations = make(map[string]string)

View File

@ -81,6 +81,7 @@ func newCreate() *cobra.Command {
cmd.Flags().StringVar(&createReq.DeploymentName, "deployment-name", createReq.DeploymentName, `The deployment name defines part of the subdomain for the workspace.`)
// TODO: complex arg: gcp_managed_network_config
// TODO: complex arg: gke_config
cmd.Flags().BoolVar(&createReq.IsNoPublicIpEnabled, "is-no-public-ip-enabled", createReq.IsNoPublicIpEnabled, `Whether no public IP is enabled for the workspace.`)
cmd.Flags().StringVar(&createReq.Location, "location", createReq.Location, `The Google Cloud region of the workspace data plane in your Google account.`)
cmd.Flags().StringVar(&createReq.ManagedServicesCustomerManagedKeyId, "managed-services-customer-managed-key-id", createReq.ManagedServicesCustomerManagedKeyId, `The ID of the workspace's managed services encryption key configuration object.`)
cmd.Flags().StringVar(&createReq.NetworkId, "network-id", createReq.NetworkId, ``)
@ -420,6 +421,7 @@ func newUpdate() *cobra.Command {
cmd.Flags().StringVar(&updateReq.ManagedServicesCustomerManagedKeyId, "managed-services-customer-managed-key-id", updateReq.ManagedServicesCustomerManagedKeyId, `The ID of the workspace's managed services encryption key configuration object.`)
cmd.Flags().StringVar(&updateReq.NetworkConnectivityConfigId, "network-connectivity-config-id", updateReq.NetworkConnectivityConfigId, ``)
cmd.Flags().StringVar(&updateReq.NetworkId, "network-id", updateReq.NetworkId, `The ID of the workspace's network configuration object.`)
cmd.Flags().StringVar(&updateReq.PrivateAccessSettingsId, "private-access-settings-id", updateReq.PrivateAccessSettingsId, `The ID of the workspace's private access settings configuration object.`)
cmd.Flags().StringVar(&updateReq.StorageConfigurationId, "storage-configuration-id", updateReq.StorageConfigurationId, `The ID of the workspace's storage configuration object.`)
cmd.Flags().StringVar(&updateReq.StorageCustomerManagedKeyId, "storage-customer-managed-key-id", updateReq.StorageCustomerManagedKeyId, `The ID of the key configuration object for workspace storage.`)

View File

@ -6,6 +6,7 @@ import (
"os"
"github.com/databricks/cli/bundle"
"github.com/databricks/cli/bundle/config/mutator"
"github.com/databricks/cli/bundle/phases"
"github.com/databricks/cli/cmd/bundle/utils"
"github.com/databricks/cli/cmd/root"
@ -62,7 +63,12 @@ func newDestroyCommand() *cobra.Command {
diags = bundle.Apply(ctx, b, bundle.Seq(
phases.Initialize(),
phases.Build(),
// We need to resolve artifact variable (how we do it in build phase)
// because some of the to-be-destroyed resource might use this variable.
// Not resolving might lead to terraform "Reference to undeclared resource" error
mutator.ResolveVariableReferences(
"artifacts",
),
phases.Destroy(),
))
if err := diags.Error(); err != nil {

Some files were not shown because too many files have changed in this diff Show More