mirror of https://github.com/databricks/cli.git
Merge remote-tracking branch 'origin/main' into dashboards
This commit is contained in:
commit
43f9155de5
|
@ -11,6 +11,7 @@
|
||||||
"toolchain": {
|
"toolchain": {
|
||||||
"required": ["go"],
|
"required": ["go"],
|
||||||
"post_generate": [
|
"post_generate": [
|
||||||
|
"go test -timeout 240s -run TestConsistentDatabricksSdkVersion github.com/databricks/cli/internal/build",
|
||||||
"go run ./bundle/internal/schema/*.go ./bundle/schema/jsonschema.json",
|
"go run ./bundle/internal/schema/*.go ./bundle/schema/jsonschema.json",
|
||||||
"echo 'bundle/internal/tf/schema/\\*.go linguist-generated=true' >> ./.gitattributes",
|
"echo 'bundle/internal/tf/schema/\\*.go linguist-generated=true' >> ./.gitattributes",
|
||||||
"echo 'go.sum linguist-generated=true' >> ./.gitattributes",
|
"echo 'go.sum linguist-generated=true' >> ./.gitattributes",
|
||||||
|
|
|
@ -1 +1 @@
|
||||||
d05898328669a3f8ab0c2ecee37db2673d3ea3f7
|
0c86ea6dbd9a730c24ff0d4e509603e476955ac5
|
|
@ -5,6 +5,7 @@ package {{(.TrimPrefix "account").SnakeName}}
|
||||||
import (
|
import (
|
||||||
"github.com/databricks/cli/libs/cmdio"
|
"github.com/databricks/cli/libs/cmdio"
|
||||||
"github.com/databricks/cli/libs/flags"
|
"github.com/databricks/cli/libs/flags"
|
||||||
|
"github.com/databricks/cli/libs/diag"
|
||||||
"github.com/databricks/cli/cmd/root"
|
"github.com/databricks/cli/cmd/root"
|
||||||
"github.com/databricks/databricks-sdk-go/service/{{.Package.Name}}"
|
"github.com/databricks/databricks-sdk-go/service/{{.Package.Name}}"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
|
@ -231,9 +232,15 @@ func new{{.PascalName}}() *cobra.Command {
|
||||||
{{- if .Request }}
|
{{- if .Request }}
|
||||||
{{ if .CanUseJson }}
|
{{ if .CanUseJson }}
|
||||||
if cmd.Flags().Changed("json") {
|
if cmd.Flags().Changed("json") {
|
||||||
err = {{.CamelName}}Json.Unmarshal(&{{.CamelName}}Req)
|
diags := {{.CamelName}}Json.Unmarshal(&{{.CamelName}}Req)
|
||||||
if err != nil {
|
if diags.HasError() {
|
||||||
return err
|
return diags.Error()
|
||||||
|
}
|
||||||
|
if len(diags) > 0 {
|
||||||
|
err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}{{end}}{{ if .MustUseJson }}else {
|
}{{end}}{{ if .MustUseJson }}else {
|
||||||
return fmt.Errorf("please provide command input in JSON format by specifying the --json flag")
|
return fmt.Errorf("please provide command input in JSON format by specifying the --json flag")
|
||||||
|
|
|
@ -6,6 +6,7 @@ cmd/account/cmd.go linguist-generated=true
|
||||||
cmd/account/credentials/credentials.go linguist-generated=true
|
cmd/account/credentials/credentials.go linguist-generated=true
|
||||||
cmd/account/csp-enablement-account/csp-enablement-account.go linguist-generated=true
|
cmd/account/csp-enablement-account/csp-enablement-account.go linguist-generated=true
|
||||||
cmd/account/custom-app-integration/custom-app-integration.go linguist-generated=true
|
cmd/account/custom-app-integration/custom-app-integration.go linguist-generated=true
|
||||||
|
cmd/account/disable-legacy-features/disable-legacy-features.go linguist-generated=true
|
||||||
cmd/account/encryption-keys/encryption-keys.go linguist-generated=true
|
cmd/account/encryption-keys/encryption-keys.go linguist-generated=true
|
||||||
cmd/account/esm-enablement-account/esm-enablement-account.go linguist-generated=true
|
cmd/account/esm-enablement-account/esm-enablement-account.go linguist-generated=true
|
||||||
cmd/account/groups/groups.go linguist-generated=true
|
cmd/account/groups/groups.go linguist-generated=true
|
||||||
|
@ -52,6 +53,7 @@ cmd/workspace/dashboard-widgets/dashboard-widgets.go linguist-generated=true
|
||||||
cmd/workspace/dashboards/dashboards.go linguist-generated=true
|
cmd/workspace/dashboards/dashboards.go linguist-generated=true
|
||||||
cmd/workspace/data-sources/data-sources.go linguist-generated=true
|
cmd/workspace/data-sources/data-sources.go linguist-generated=true
|
||||||
cmd/workspace/default-namespace/default-namespace.go linguist-generated=true
|
cmd/workspace/default-namespace/default-namespace.go linguist-generated=true
|
||||||
|
cmd/workspace/disable-legacy-access/disable-legacy-access.go linguist-generated=true
|
||||||
cmd/workspace/enhanced-security-monitoring/enhanced-security-monitoring.go linguist-generated=true
|
cmd/workspace/enhanced-security-monitoring/enhanced-security-monitoring.go linguist-generated=true
|
||||||
cmd/workspace/experiments/experiments.go linguist-generated=true
|
cmd/workspace/experiments/experiments.go linguist-generated=true
|
||||||
cmd/workspace/external-locations/external-locations.go linguist-generated=true
|
cmd/workspace/external-locations/external-locations.go linguist-generated=true
|
||||||
|
@ -108,6 +110,7 @@ cmd/workspace/storage-credentials/storage-credentials.go linguist-generated=true
|
||||||
cmd/workspace/system-schemas/system-schemas.go linguist-generated=true
|
cmd/workspace/system-schemas/system-schemas.go linguist-generated=true
|
||||||
cmd/workspace/table-constraints/table-constraints.go linguist-generated=true
|
cmd/workspace/table-constraints/table-constraints.go linguist-generated=true
|
||||||
cmd/workspace/tables/tables.go linguist-generated=true
|
cmd/workspace/tables/tables.go linguist-generated=true
|
||||||
|
cmd/workspace/temporary-table-credentials/temporary-table-credentials.go linguist-generated=true
|
||||||
cmd/workspace/token-management/token-management.go linguist-generated=true
|
cmd/workspace/token-management/token-management.go linguist-generated=true
|
||||||
cmd/workspace/tokens/tokens.go linguist-generated=true
|
cmd/workspace/tokens/tokens.go linguist-generated=true
|
||||||
cmd/workspace/users/users.go linguist-generated=true
|
cmd/workspace/users/users.go linguist-generated=true
|
||||||
|
|
68
CHANGELOG.md
68
CHANGELOG.md
|
@ -1,5 +1,73 @@
|
||||||
# Version changelog
|
# Version changelog
|
||||||
|
|
||||||
|
## [Release] Release v0.230.0
|
||||||
|
|
||||||
|
Notable changes for Databricks Asset Bundles:
|
||||||
|
|
||||||
|
Workspace paths are automatically prefixed with `/Workspace`. In addition, all usage of path strings such as `/Workspace/${workspace.root_path}/...` in bundle configuration is automatically replaced with `${workspace.root_path}/...` and generates a warning as part of bundle validate.
|
||||||
|
|
||||||
|
More details can be found here: https://docs.databricks.com/en/release-notes/dev-tools/bundles.html#workspace-paths
|
||||||
|
|
||||||
|
Bundles:
|
||||||
|
* Add an error if state files grow bigger than the export limit ([#1795](https://github.com/databricks/cli/pull/1795)).
|
||||||
|
* Always prepend bundle remote paths with /Workspace ([#1724](https://github.com/databricks/cli/pull/1724)).
|
||||||
|
* Add resource path field to bundle workspace configuration ([#1800](https://github.com/databricks/cli/pull/1800)).
|
||||||
|
* Add validation for files with a `.(resource-name).yml` extension ([#1780](https://github.com/databricks/cli/pull/1780)).
|
||||||
|
|
||||||
|
Internal:
|
||||||
|
* Remove deprecated or readonly fields from the bundle schema ([#1809](https://github.com/databricks/cli/pull/1809)).
|
||||||
|
|
||||||
|
API Changes:
|
||||||
|
* Changed `databricks git-credentials create`, `databricks git-credentials delete`, `databricks git-credentials get`, `databricks git-credentials list`, `databricks git-credentials update` commands .
|
||||||
|
* Changed `databricks repos create`, `databricks repos delete`, `databricks repos get`, `databricks repos update` command .
|
||||||
|
|
||||||
|
OpenAPI commit 0c86ea6dbd9a730c24ff0d4e509603e476955ac5 (2024-10-02)
|
||||||
|
Dependency updates:
|
||||||
|
* Upgrade TF provider to 1.53.0 ([#1815](https://github.com/databricks/cli/pull/1815)).
|
||||||
|
* Bump golang.org/x/term from 0.24.0 to 0.25.0 ([#1811](https://github.com/databricks/cli/pull/1811)).
|
||||||
|
* Bump golang.org/x/text from 0.18.0 to 0.19.0 ([#1812](https://github.com/databricks/cli/pull/1812)).
|
||||||
|
* Bump github.com/databricks/databricks-sdk-go from 0.47.0 to 0.48.0 ([#1810](https://github.com/databricks/cli/pull/1810)).
|
||||||
|
|
||||||
|
## [Release] Release v0.229.0
|
||||||
|
|
||||||
|
Bundles:
|
||||||
|
* Added support for creating all-purpose clusters ([#1698](https://github.com/databricks/cli/pull/1698)).
|
||||||
|
* Reduce time until the prompt is shown for bundle run ([#1727](https://github.com/databricks/cli/pull/1727)).
|
||||||
|
* Use Unity Catalog for pipelines in the default-python template ([#1766](https://github.com/databricks/cli/pull/1766)).
|
||||||
|
* Add verbose flag to the "bundle deploy" command ([#1774](https://github.com/databricks/cli/pull/1774)).
|
||||||
|
* Fixed full variable override detection ([#1787](https://github.com/databricks/cli/pull/1787)).
|
||||||
|
* Add sub-extension to resource files in built-in templates ([#1777](https://github.com/databricks/cli/pull/1777)).
|
||||||
|
* Fix panic in `apply_presets.go` ([#1796](https://github.com/databricks/cli/pull/1796)).
|
||||||
|
|
||||||
|
Internal:
|
||||||
|
* Assert tokens are redacted in origin URL when username is not specified ([#1785](https://github.com/databricks/cli/pull/1785)).
|
||||||
|
* Refactor jobs path translation ([#1782](https://github.com/databricks/cli/pull/1782)).
|
||||||
|
* Add JobTaskClusterSpec validate mutator ([#1784](https://github.com/databricks/cli/pull/1784)).
|
||||||
|
* Pin Go toolchain to 1.22.7 ([#1790](https://github.com/databricks/cli/pull/1790)).
|
||||||
|
* Modify SetLocation test utility to take full locations as argument ([#1788](https://github.com/databricks/cli/pull/1788)).
|
||||||
|
* Simplified isFullVariableOverrideDef implementation ([#1791](https://github.com/databricks/cli/pull/1791)).
|
||||||
|
* Sort tasks by `task_key` before generating the Terraform configuration ([#1776](https://github.com/databricks/cli/pull/1776)).
|
||||||
|
* Trim trailing whitespace ([#1794](https://github.com/databricks/cli/pull/1794)).
|
||||||
|
* Move trampoline code into trampoline package ([#1793](https://github.com/databricks/cli/pull/1793)).
|
||||||
|
* Rename `RootPath` -> `BundleRootPath` ([#1792](https://github.com/databricks/cli/pull/1792)).
|
||||||
|
|
||||||
|
API Changes:
|
||||||
|
* Changed `databricks apps delete` command to return .
|
||||||
|
* Changed `databricks apps deploy` command with new required argument order.
|
||||||
|
* Changed `databricks apps start` command to return .
|
||||||
|
* Changed `databricks apps stop` command to return .
|
||||||
|
* Added `databricks temporary-table-credentials` command group.
|
||||||
|
* Added `databricks serving-endpoints put-ai-gateway` command.
|
||||||
|
* Added `databricks disable-legacy-access` command group.
|
||||||
|
* Added `databricks account disable-legacy-features` command group.
|
||||||
|
|
||||||
|
OpenAPI commit 6f6b1371e640f2dfeba72d365ac566368656f6b6 (2024-09-19)
|
||||||
|
Dependency updates:
|
||||||
|
* Upgrade to Go SDK 0.47.0 ([#1799](https://github.com/databricks/cli/pull/1799)).
|
||||||
|
* Upgrade to TF provider 1.52 ([#1781](https://github.com/databricks/cli/pull/1781)).
|
||||||
|
* Bump golang.org/x/mod from 0.20.0 to 0.21.0 ([#1758](https://github.com/databricks/cli/pull/1758)).
|
||||||
|
* Bump github.com/hashicorp/hc-install from 0.7.0 to 0.9.0 ([#1772](https://github.com/databricks/cli/pull/1772)).
|
||||||
|
|
||||||
## [Release] Release v0.228.1
|
## [Release] Release v0.228.1
|
||||||
|
|
||||||
Bundles:
|
Bundles:
|
||||||
|
|
|
@ -18,7 +18,7 @@ func TestEntryPointNoRootPath(t *testing.T) {
|
||||||
|
|
||||||
func TestEntryPoint(t *testing.T) {
|
func TestEntryPoint(t *testing.T) {
|
||||||
b := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
BundleRootPath: "testdata",
|
BundleRootPath: "testdata/basic",
|
||||||
}
|
}
|
||||||
diags := bundle.Apply(context.Background(), b, loader.EntryPoint())
|
diags := bundle.Apply(context.Background(), b, loader.EntryPoint())
|
||||||
require.NoError(t, diags.Error())
|
require.NoError(t, diags.Error())
|
||||||
|
|
|
@ -3,12 +3,135 @@ package loader
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"slices"
|
||||||
|
"sort"
|
||||||
|
"strings"
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle"
|
"github.com/databricks/cli/bundle"
|
||||||
"github.com/databricks/cli/bundle/config"
|
"github.com/databricks/cli/bundle/config"
|
||||||
"github.com/databricks/cli/libs/diag"
|
"github.com/databricks/cli/libs/diag"
|
||||||
|
"github.com/databricks/cli/libs/dyn"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
func validateFileFormat(configRoot dyn.Value, filePath string) diag.Diagnostics {
|
||||||
|
for _, resourceDescription := range config.SupportedResources() {
|
||||||
|
singularName := resourceDescription.SingularName
|
||||||
|
|
||||||
|
for _, yamlExt := range []string{"yml", "yaml"} {
|
||||||
|
ext := fmt.Sprintf(".%s.%s", singularName, yamlExt)
|
||||||
|
if strings.HasSuffix(filePath, ext) {
|
||||||
|
return validateSingleResourceDefined(configRoot, ext, singularName)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func validateSingleResourceDefined(configRoot dyn.Value, ext, typ string) diag.Diagnostics {
|
||||||
|
type resource struct {
|
||||||
|
path dyn.Path
|
||||||
|
value dyn.Value
|
||||||
|
typ string
|
||||||
|
key string
|
||||||
|
}
|
||||||
|
|
||||||
|
resources := []resource{}
|
||||||
|
supportedResources := config.SupportedResources()
|
||||||
|
|
||||||
|
// Gather all resources defined in the resources block.
|
||||||
|
_, err := dyn.MapByPattern(
|
||||||
|
configRoot,
|
||||||
|
dyn.NewPattern(dyn.Key("resources"), dyn.AnyKey(), dyn.AnyKey()),
|
||||||
|
func(p dyn.Path, v dyn.Value) (dyn.Value, error) {
|
||||||
|
// The key for the resource, e.g. "my_job" for jobs.my_job.
|
||||||
|
k := p[2].Key()
|
||||||
|
// The type of the resource, e.g. "job" for jobs.my_job.
|
||||||
|
typ := supportedResources[p[1].Key()].SingularName
|
||||||
|
|
||||||
|
resources = append(resources, resource{path: p, value: v, typ: typ, key: k})
|
||||||
|
return v, nil
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return diag.FromErr(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Gather all resources defined in a target block.
|
||||||
|
_, err = dyn.MapByPattern(
|
||||||
|
configRoot,
|
||||||
|
dyn.NewPattern(dyn.Key("targets"), dyn.AnyKey(), dyn.Key("resources"), dyn.AnyKey(), dyn.AnyKey()),
|
||||||
|
func(p dyn.Path, v dyn.Value) (dyn.Value, error) {
|
||||||
|
// The key for the resource, e.g. "my_job" for jobs.my_job.
|
||||||
|
k := p[4].Key()
|
||||||
|
// The type of the resource, e.g. "job" for jobs.my_job.
|
||||||
|
typ := supportedResources[p[3].Key()].SingularName
|
||||||
|
|
||||||
|
resources = append(resources, resource{path: p, value: v, typ: typ, key: k})
|
||||||
|
return v, nil
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return diag.FromErr(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
typeMatch := true
|
||||||
|
seenKeys := map[string]struct{}{}
|
||||||
|
for _, rr := range resources {
|
||||||
|
// case: The resource is not of the correct type.
|
||||||
|
if rr.typ != typ {
|
||||||
|
typeMatch = false
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
seenKeys[rr.key] = struct{}{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Format matches. There's at most one resource defined in the file.
|
||||||
|
// The resource is also of the correct type.
|
||||||
|
if typeMatch && len(seenKeys) <= 1 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
detail := strings.Builder{}
|
||||||
|
detail.WriteString("The following resources are defined or configured in this file:\n")
|
||||||
|
lines := []string{}
|
||||||
|
for _, r := range resources {
|
||||||
|
lines = append(lines, fmt.Sprintf(" - %s (%s)\n", r.key, r.typ))
|
||||||
|
}
|
||||||
|
// Sort the lines to print to make the output deterministic.
|
||||||
|
sort.Strings(lines)
|
||||||
|
// Compact the lines before writing them to the message to remove any duplicate lines.
|
||||||
|
// This is needed because we do not dedup earlier when gathering the resources
|
||||||
|
// and it's valid to define the same resource in both the resources and targets block.
|
||||||
|
lines = slices.Compact(lines)
|
||||||
|
for _, l := range lines {
|
||||||
|
detail.WriteString(l)
|
||||||
|
}
|
||||||
|
|
||||||
|
locations := []dyn.Location{}
|
||||||
|
paths := []dyn.Path{}
|
||||||
|
for _, rr := range resources {
|
||||||
|
locations = append(locations, rr.value.Locations()...)
|
||||||
|
paths = append(paths, rr.path)
|
||||||
|
}
|
||||||
|
// Sort the locations and paths to make the output deterministic.
|
||||||
|
sort.Slice(locations, func(i, j int) bool {
|
||||||
|
return locations[i].String() < locations[j].String()
|
||||||
|
})
|
||||||
|
sort.Slice(paths, func(i, j int) bool {
|
||||||
|
return paths[i].String() < paths[j].String()
|
||||||
|
})
|
||||||
|
|
||||||
|
return diag.Diagnostics{
|
||||||
|
{
|
||||||
|
Severity: diag.Recommendation,
|
||||||
|
Summary: fmt.Sprintf("define a single %s in a file with the %s extension.", strings.ReplaceAll(typ, "_", " "), ext),
|
||||||
|
Detail: detail.String(),
|
||||||
|
Locations: locations,
|
||||||
|
Paths: paths,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
type processInclude struct {
|
type processInclude struct {
|
||||||
fullPath string
|
fullPath string
|
||||||
relPath string
|
relPath string
|
||||||
|
@ -31,6 +154,13 @@ func (m *processInclude) Apply(_ context.Context, b *bundle.Bundle) diag.Diagnos
|
||||||
if diags.HasError() {
|
if diags.HasError() {
|
||||||
return diags
|
return diags
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Add any diagnostics associated with the file format.
|
||||||
|
diags = append(diags, validateFileFormat(this.Value(), m.relPath)...)
|
||||||
|
if diags.HasError() {
|
||||||
|
return diags
|
||||||
|
}
|
||||||
|
|
||||||
err := b.Config.Merge(this)
|
err := b.Config.Merge(this)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
diags = diags.Extend(diag.FromErr(err))
|
diags = diags.Extend(diag.FromErr(err))
|
||||||
|
|
|
@ -8,13 +8,15 @@ import (
|
||||||
"github.com/databricks/cli/bundle"
|
"github.com/databricks/cli/bundle"
|
||||||
"github.com/databricks/cli/bundle/config"
|
"github.com/databricks/cli/bundle/config"
|
||||||
"github.com/databricks/cli/bundle/config/loader"
|
"github.com/databricks/cli/bundle/config/loader"
|
||||||
|
"github.com/databricks/cli/libs/diag"
|
||||||
|
"github.com/databricks/cli/libs/dyn"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestProcessInclude(t *testing.T) {
|
func TestProcessInclude(t *testing.T) {
|
||||||
b := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
BundleRootPath: "testdata",
|
BundleRootPath: "testdata/basic",
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
Workspace: config.Workspace{
|
Workspace: config.Workspace{
|
||||||
Host: "foo",
|
Host: "foo",
|
||||||
|
@ -33,3 +35,184 @@ func TestProcessInclude(t *testing.T) {
|
||||||
require.NoError(t, diags.Error())
|
require.NoError(t, diags.Error())
|
||||||
assert.Equal(t, "bar", b.Config.Workspace.Host)
|
assert.Equal(t, "bar", b.Config.Workspace.Host)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestProcessIncludeFormatMatch(t *testing.T) {
|
||||||
|
for _, fileName := range []string{
|
||||||
|
"one_job.job.yml",
|
||||||
|
"one_pipeline.pipeline.yaml",
|
||||||
|
"two_job.yml",
|
||||||
|
"job_and_pipeline.yml",
|
||||||
|
"multiple_resources.yml",
|
||||||
|
} {
|
||||||
|
t.Run(fileName, func(t *testing.T) {
|
||||||
|
b := &bundle.Bundle{
|
||||||
|
BundleRootPath: "testdata/format_match",
|
||||||
|
Config: config.Root{
|
||||||
|
Bundle: config.Bundle{
|
||||||
|
Name: "format_test",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
m := loader.ProcessInclude(filepath.Join(b.BundleRootPath, fileName), fileName)
|
||||||
|
diags := bundle.Apply(context.Background(), b, m)
|
||||||
|
assert.Empty(t, diags)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestProcessIncludeFormatNotMatch(t *testing.T) {
|
||||||
|
for fileName, expectedDiags := range map[string]diag.Diagnostics{
|
||||||
|
"single_job.pipeline.yaml": {
|
||||||
|
{
|
||||||
|
Severity: diag.Recommendation,
|
||||||
|
Summary: "define a single pipeline in a file with the .pipeline.yaml extension.",
|
||||||
|
Detail: "The following resources are defined or configured in this file:\n - job1 (job)\n",
|
||||||
|
Locations: []dyn.Location{
|
||||||
|
{File: filepath.FromSlash("testdata/format_not_match/single_job.pipeline.yaml"), Line: 11, Column: 11},
|
||||||
|
{File: filepath.FromSlash("testdata/format_not_match/single_job.pipeline.yaml"), Line: 4, Column: 7},
|
||||||
|
},
|
||||||
|
Paths: []dyn.Path{
|
||||||
|
dyn.MustPathFromString("resources.jobs.job1"),
|
||||||
|
dyn.MustPathFromString("targets.target1.resources.jobs.job1"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"job_and_pipeline.job.yml": {
|
||||||
|
{
|
||||||
|
Severity: diag.Recommendation,
|
||||||
|
Summary: "define a single job in a file with the .job.yml extension.",
|
||||||
|
Detail: "The following resources are defined or configured in this file:\n - job1 (job)\n - pipeline1 (pipeline)\n",
|
||||||
|
Locations: []dyn.Location{
|
||||||
|
{File: filepath.FromSlash("testdata/format_not_match/job_and_pipeline.job.yml"), Line: 11, Column: 11},
|
||||||
|
{File: filepath.FromSlash("testdata/format_not_match/job_and_pipeline.job.yml"), Line: 4, Column: 7},
|
||||||
|
},
|
||||||
|
Paths: []dyn.Path{
|
||||||
|
dyn.MustPathFromString("resources.pipelines.pipeline1"),
|
||||||
|
dyn.MustPathFromString("targets.target1.resources.jobs.job1"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"job_and_pipeline.experiment.yml": {
|
||||||
|
{
|
||||||
|
Severity: diag.Recommendation,
|
||||||
|
Summary: "define a single experiment in a file with the .experiment.yml extension.",
|
||||||
|
Detail: "The following resources are defined or configured in this file:\n - job1 (job)\n - pipeline1 (pipeline)\n",
|
||||||
|
Locations: []dyn.Location{
|
||||||
|
{File: filepath.FromSlash("testdata/format_not_match/job_and_pipeline.experiment.yml"), Line: 11, Column: 11},
|
||||||
|
{File: filepath.FromSlash("testdata/format_not_match/job_and_pipeline.experiment.yml"), Line: 4, Column: 7},
|
||||||
|
},
|
||||||
|
Paths: []dyn.Path{
|
||||||
|
dyn.MustPathFromString("resources.pipelines.pipeline1"),
|
||||||
|
dyn.MustPathFromString("targets.target1.resources.jobs.job1"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"two_jobs.job.yml": {
|
||||||
|
{
|
||||||
|
Severity: diag.Recommendation,
|
||||||
|
Summary: "define a single job in a file with the .job.yml extension.",
|
||||||
|
Detail: "The following resources are defined or configured in this file:\n - job1 (job)\n - job2 (job)\n",
|
||||||
|
Locations: []dyn.Location{
|
||||||
|
{File: filepath.FromSlash("testdata/format_not_match/two_jobs.job.yml"), Line: 4, Column: 7},
|
||||||
|
{File: filepath.FromSlash("testdata/format_not_match/two_jobs.job.yml"), Line: 7, Column: 7},
|
||||||
|
},
|
||||||
|
Paths: []dyn.Path{
|
||||||
|
dyn.MustPathFromString("resources.jobs.job1"),
|
||||||
|
dyn.MustPathFromString("resources.jobs.job2"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"second_job_in_target.job.yml": {
|
||||||
|
{
|
||||||
|
Severity: diag.Recommendation,
|
||||||
|
Summary: "define a single job in a file with the .job.yml extension.",
|
||||||
|
Detail: "The following resources are defined or configured in this file:\n - job1 (job)\n - job2 (job)\n",
|
||||||
|
Locations: []dyn.Location{
|
||||||
|
{File: filepath.FromSlash("testdata/format_not_match/second_job_in_target.job.yml"), Line: 11, Column: 11},
|
||||||
|
{File: filepath.FromSlash("testdata/format_not_match/second_job_in_target.job.yml"), Line: 4, Column: 7},
|
||||||
|
},
|
||||||
|
Paths: []dyn.Path{
|
||||||
|
dyn.MustPathFromString("resources.jobs.job1"),
|
||||||
|
dyn.MustPathFromString("targets.target1.resources.jobs.job2"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"two_jobs_in_target.job.yml": {
|
||||||
|
{
|
||||||
|
Severity: diag.Recommendation,
|
||||||
|
Summary: "define a single job in a file with the .job.yml extension.",
|
||||||
|
Detail: "The following resources are defined or configured in this file:\n - job1 (job)\n - job2 (job)\n",
|
||||||
|
Locations: []dyn.Location{
|
||||||
|
{File: filepath.FromSlash("testdata/format_not_match/two_jobs_in_target.job.yml"), Line: 6, Column: 11},
|
||||||
|
{File: filepath.FromSlash("testdata/format_not_match/two_jobs_in_target.job.yml"), Line: 8, Column: 11},
|
||||||
|
},
|
||||||
|
Paths: []dyn.Path{
|
||||||
|
dyn.MustPathFromString("targets.target1.resources.jobs.job1"),
|
||||||
|
dyn.MustPathFromString("targets.target1.resources.jobs.job2"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"multiple_resources.model_serving_endpoint.yml": {
|
||||||
|
{
|
||||||
|
Severity: diag.Recommendation,
|
||||||
|
Summary: "define a single model serving endpoint in a file with the .model_serving_endpoint.yml extension.",
|
||||||
|
Detail: `The following resources are defined or configured in this file:
|
||||||
|
- experiment1 (experiment)
|
||||||
|
- job1 (job)
|
||||||
|
- job2 (job)
|
||||||
|
- job3 (job)
|
||||||
|
- model1 (model)
|
||||||
|
- model_serving_endpoint1 (model_serving_endpoint)
|
||||||
|
- pipeline1 (pipeline)
|
||||||
|
- pipeline2 (pipeline)
|
||||||
|
- quality_monitor1 (quality_monitor)
|
||||||
|
- registered_model1 (registered_model)
|
||||||
|
- schema1 (schema)
|
||||||
|
`,
|
||||||
|
Locations: []dyn.Location{
|
||||||
|
{File: filepath.FromSlash("testdata/format_not_match/multiple_resources.model_serving_endpoint.yml"), Line: 12, Column: 7},
|
||||||
|
{File: filepath.FromSlash("testdata/format_not_match/multiple_resources.model_serving_endpoint.yml"), Line: 14, Column: 7},
|
||||||
|
{File: filepath.FromSlash("testdata/format_not_match/multiple_resources.model_serving_endpoint.yml"), Line: 18, Column: 7},
|
||||||
|
{File: filepath.FromSlash("testdata/format_not_match/multiple_resources.model_serving_endpoint.yml"), Line: 22, Column: 7},
|
||||||
|
{File: filepath.FromSlash("testdata/format_not_match/multiple_resources.model_serving_endpoint.yml"), Line: 24, Column: 7},
|
||||||
|
{File: filepath.FromSlash("testdata/format_not_match/multiple_resources.model_serving_endpoint.yml"), Line: 28, Column: 7},
|
||||||
|
{File: filepath.FromSlash("testdata/format_not_match/multiple_resources.model_serving_endpoint.yml"), Line: 35, Column: 11},
|
||||||
|
{File: filepath.FromSlash("testdata/format_not_match/multiple_resources.model_serving_endpoint.yml"), Line: 39, Column: 11},
|
||||||
|
{File: filepath.FromSlash("testdata/format_not_match/multiple_resources.model_serving_endpoint.yml"), Line: 43, Column: 11},
|
||||||
|
{File: filepath.FromSlash("testdata/format_not_match/multiple_resources.model_serving_endpoint.yml"), Line: 4, Column: 7},
|
||||||
|
{File: filepath.FromSlash("testdata/format_not_match/multiple_resources.model_serving_endpoint.yml"), Line: 8, Column: 7},
|
||||||
|
},
|
||||||
|
Paths: []dyn.Path{
|
||||||
|
dyn.MustPathFromString("resources.experiments.experiment1"),
|
||||||
|
dyn.MustPathFromString("resources.jobs.job1"),
|
||||||
|
dyn.MustPathFromString("resources.jobs.job2"),
|
||||||
|
dyn.MustPathFromString("resources.model_serving_endpoints.model_serving_endpoint1"),
|
||||||
|
dyn.MustPathFromString("resources.models.model1"),
|
||||||
|
dyn.MustPathFromString("resources.pipelines.pipeline1"),
|
||||||
|
dyn.MustPathFromString("resources.pipelines.pipeline2"),
|
||||||
|
dyn.MustPathFromString("resources.schemas.schema1"),
|
||||||
|
dyn.MustPathFromString("targets.target1.resources.jobs.job3"),
|
||||||
|
dyn.MustPathFromString("targets.target1.resources.quality_monitors.quality_monitor1"),
|
||||||
|
dyn.MustPathFromString("targets.target1.resources.registered_models.registered_model1"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
} {
|
||||||
|
t.Run(fileName, func(t *testing.T) {
|
||||||
|
b := &bundle.Bundle{
|
||||||
|
BundleRootPath: "testdata/format_not_match",
|
||||||
|
Config: config.Root{
|
||||||
|
Bundle: config.Bundle{
|
||||||
|
Name: "format_test",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
m := loader.ProcessInclude(filepath.Join(b.BundleRootPath, fileName), fileName)
|
||||||
|
diags := bundle.Apply(context.Background(), b, m)
|
||||||
|
require.Len(t, diags, 1)
|
||||||
|
assert.Equal(t, expectedDiags, diags)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -0,0 +1,11 @@
|
||||||
|
resources:
|
||||||
|
pipelines:
|
||||||
|
pipeline1:
|
||||||
|
name: pipeline1
|
||||||
|
|
||||||
|
targets:
|
||||||
|
target1:
|
||||||
|
resources:
|
||||||
|
jobs:
|
||||||
|
job1:
|
||||||
|
name: job1
|
|
@ -0,0 +1,43 @@
|
||||||
|
resources:
|
||||||
|
experiments:
|
||||||
|
experiment1:
|
||||||
|
name: experiment1
|
||||||
|
|
||||||
|
model_serving_endpoints:
|
||||||
|
model_serving_endpoint1:
|
||||||
|
name: model_serving_endpoint1
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
job1:
|
||||||
|
name: job1
|
||||||
|
job2:
|
||||||
|
name: job2
|
||||||
|
|
||||||
|
models:
|
||||||
|
model1:
|
||||||
|
name: model1
|
||||||
|
|
||||||
|
pipelines:
|
||||||
|
pipeline1:
|
||||||
|
name: pipeline1
|
||||||
|
pipeline2:
|
||||||
|
name: pipeline2
|
||||||
|
|
||||||
|
schemas:
|
||||||
|
schema1:
|
||||||
|
name: schema1
|
||||||
|
|
||||||
|
targets:
|
||||||
|
target1:
|
||||||
|
resources:
|
||||||
|
quality_monitors:
|
||||||
|
quality_monitor1:
|
||||||
|
baseline_table_name: quality_monitor1
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
job3:
|
||||||
|
name: job3
|
||||||
|
|
||||||
|
registered_models:
|
||||||
|
registered_model1:
|
||||||
|
name: registered_model1
|
|
@ -0,0 +1,11 @@
|
||||||
|
resources:
|
||||||
|
jobs:
|
||||||
|
job1:
|
||||||
|
name: job1
|
||||||
|
|
||||||
|
targets:
|
||||||
|
target1:
|
||||||
|
resources:
|
||||||
|
jobs:
|
||||||
|
job1:
|
||||||
|
description: job1
|
|
@ -0,0 +1,4 @@
|
||||||
|
resources:
|
||||||
|
pipelines:
|
||||||
|
pipeline1:
|
||||||
|
name: pipeline1
|
|
@ -0,0 +1,7 @@
|
||||||
|
resources:
|
||||||
|
jobs:
|
||||||
|
job1:
|
||||||
|
name: job1
|
||||||
|
|
||||||
|
job2:
|
||||||
|
name: job2
|
11
bundle/config/loader/testdata/format_not_match/job_and_pipeline.experiment.yml
vendored
Normal file
11
bundle/config/loader/testdata/format_not_match/job_and_pipeline.experiment.yml
vendored
Normal file
|
@ -0,0 +1,11 @@
|
||||||
|
resources:
|
||||||
|
pipelines:
|
||||||
|
pipeline1:
|
||||||
|
name: pipeline1
|
||||||
|
|
||||||
|
targets:
|
||||||
|
target1:
|
||||||
|
resources:
|
||||||
|
jobs:
|
||||||
|
job1:
|
||||||
|
name: job1
|
|
@ -0,0 +1,11 @@
|
||||||
|
resources:
|
||||||
|
pipelines:
|
||||||
|
pipeline1:
|
||||||
|
name: pipeline1
|
||||||
|
|
||||||
|
targets:
|
||||||
|
target1:
|
||||||
|
resources:
|
||||||
|
jobs:
|
||||||
|
job1:
|
||||||
|
name: job1
|
43
bundle/config/loader/testdata/format_not_match/multiple_resources.model_serving_endpoint.yml
vendored
Normal file
43
bundle/config/loader/testdata/format_not_match/multiple_resources.model_serving_endpoint.yml
vendored
Normal file
|
@ -0,0 +1,43 @@
|
||||||
|
resources:
|
||||||
|
experiments:
|
||||||
|
experiment1:
|
||||||
|
name: experiment1
|
||||||
|
|
||||||
|
model_serving_endpoints:
|
||||||
|
model_serving_endpoint1:
|
||||||
|
name: model_serving_endpoint1
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
job1:
|
||||||
|
name: job1
|
||||||
|
job2:
|
||||||
|
name: job2
|
||||||
|
|
||||||
|
models:
|
||||||
|
model1:
|
||||||
|
name: model1
|
||||||
|
|
||||||
|
pipelines:
|
||||||
|
pipeline1:
|
||||||
|
name: pipeline1
|
||||||
|
pipeline2:
|
||||||
|
name: pipeline2
|
||||||
|
|
||||||
|
schemas:
|
||||||
|
schema1:
|
||||||
|
name: schema1
|
||||||
|
|
||||||
|
targets:
|
||||||
|
target1:
|
||||||
|
resources:
|
||||||
|
quality_monitors:
|
||||||
|
quality_monitor1:
|
||||||
|
baseline_table_name: quality_monitor1
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
job3:
|
||||||
|
name: job3
|
||||||
|
|
||||||
|
registered_models:
|
||||||
|
registered_model1:
|
||||||
|
name: registered_model1
|
|
@ -0,0 +1,11 @@
|
||||||
|
resources:
|
||||||
|
jobs:
|
||||||
|
job1:
|
||||||
|
name: job1
|
||||||
|
|
||||||
|
targets:
|
||||||
|
target1:
|
||||||
|
resources:
|
||||||
|
jobs:
|
||||||
|
job2:
|
||||||
|
name: job2
|
|
@ -0,0 +1,11 @@
|
||||||
|
resources:
|
||||||
|
jobs:
|
||||||
|
job1:
|
||||||
|
name: job1
|
||||||
|
|
||||||
|
targets:
|
||||||
|
target1:
|
||||||
|
resources:
|
||||||
|
jobs:
|
||||||
|
job1:
|
||||||
|
description: job1
|
|
@ -0,0 +1,7 @@
|
||||||
|
resources:
|
||||||
|
jobs:
|
||||||
|
job1:
|
||||||
|
name: job1
|
||||||
|
|
||||||
|
job2:
|
||||||
|
name: job2
|
|
@ -0,0 +1,8 @@
|
||||||
|
targets:
|
||||||
|
target1:
|
||||||
|
resources:
|
||||||
|
jobs:
|
||||||
|
job1:
|
||||||
|
description: job1
|
||||||
|
job2:
|
||||||
|
description: job2
|
|
@ -33,7 +33,7 @@ func (m *expandWorkspaceRoot) Apply(ctx context.Context, b *bundle.Bundle) diag.
|
||||||
}
|
}
|
||||||
|
|
||||||
if strings.HasPrefix(root, "~/") {
|
if strings.HasPrefix(root, "~/") {
|
||||||
home := fmt.Sprintf("/Users/%s", currentUser.UserName)
|
home := fmt.Sprintf("/Workspace/Users/%s", currentUser.UserName)
|
||||||
b.Config.Workspace.RootPath = path.Join(home, root[2:])
|
b.Config.Workspace.RootPath = path.Join(home, root[2:])
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -27,7 +27,7 @@ func TestExpandWorkspaceRoot(t *testing.T) {
|
||||||
}
|
}
|
||||||
diags := bundle.Apply(context.Background(), b, mutator.ExpandWorkspaceRoot())
|
diags := bundle.Apply(context.Background(), b, mutator.ExpandWorkspaceRoot())
|
||||||
require.NoError(t, diags.Error())
|
require.NoError(t, diags.Error())
|
||||||
assert.Equal(t, "/Users/jane@doe.com/foo", b.Config.Workspace.RootPath)
|
assert.Equal(t, "/Workspace/Users/jane@doe.com/foo", b.Config.Workspace.RootPath)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestExpandWorkspaceRootDoesNothing(t *testing.T) {
|
func TestExpandWorkspaceRootDoesNothing(t *testing.T) {
|
||||||
|
|
|
@ -0,0 +1,65 @@
|
||||||
|
package mutator
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"net/url"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/databricks/cli/bundle"
|
||||||
|
"github.com/databricks/cli/libs/diag"
|
||||||
|
)
|
||||||
|
|
||||||
|
type initializeURLs struct {
|
||||||
|
}
|
||||||
|
|
||||||
|
// InitializeURLs makes sure the URL field of each resource is configured.
|
||||||
|
// NOTE: since this depends on an extra API call, this mutator adds some extra
|
||||||
|
// latency. As such, it should only be used when needed.
|
||||||
|
// This URL field is used for the output of the 'bundle summary' CLI command.
|
||||||
|
func InitializeURLs() bundle.Mutator {
|
||||||
|
return &initializeURLs{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *initializeURLs) Name() string {
|
||||||
|
return "InitializeURLs"
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *initializeURLs) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||||
|
workspaceId, err := b.WorkspaceClient().CurrentWorkspaceID(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return diag.FromErr(err)
|
||||||
|
}
|
||||||
|
orgId := strconv.FormatInt(workspaceId, 10)
|
||||||
|
host := b.WorkspaceClient().Config.CanonicalHostName()
|
||||||
|
initializeForWorkspace(b, orgId, host)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func initializeForWorkspace(b *bundle.Bundle, orgId string, host string) error {
|
||||||
|
baseURL, err := url.Parse(host)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add ?o=<workspace id> only if <workspace id> wasn't in the subdomain already.
|
||||||
|
// The ?o= is needed when vanity URLs / legacy workspace URLs are used.
|
||||||
|
// If it's not needed we prefer to leave it out since these URLs are rather
|
||||||
|
// long for most terminals.
|
||||||
|
//
|
||||||
|
// See https://docs.databricks.com/en/workspace/workspace-details.html for
|
||||||
|
// further reading about the '?o=' suffix.
|
||||||
|
if !strings.Contains(baseURL.Hostname(), orgId) {
|
||||||
|
values := baseURL.Query()
|
||||||
|
values.Add("o", orgId)
|
||||||
|
baseURL.RawQuery = values.Encode()
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, group := range b.Config.Resources.AllResources() {
|
||||||
|
for _, r := range group.Resources {
|
||||||
|
r.InitializeURL(*baseURL)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
|
@ -0,0 +1,130 @@
|
||||||
|
package mutator
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/databricks/cli/bundle"
|
||||||
|
"github.com/databricks/cli/bundle/config"
|
||||||
|
"github.com/databricks/cli/bundle/config/resources"
|
||||||
|
"github.com/databricks/databricks-sdk-go/service/catalog"
|
||||||
|
"github.com/databricks/databricks-sdk-go/service/compute"
|
||||||
|
"github.com/databricks/databricks-sdk-go/service/jobs"
|
||||||
|
"github.com/databricks/databricks-sdk-go/service/ml"
|
||||||
|
"github.com/databricks/databricks-sdk-go/service/pipelines"
|
||||||
|
"github.com/databricks/databricks-sdk-go/service/serving"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestInitializeURLs(t *testing.T) {
|
||||||
|
b := &bundle.Bundle{
|
||||||
|
Config: config.Root{
|
||||||
|
Workspace: config.Workspace{
|
||||||
|
Host: "https://mycompany.databricks.com/",
|
||||||
|
},
|
||||||
|
Resources: config.Resources{
|
||||||
|
Jobs: map[string]*resources.Job{
|
||||||
|
"job1": {
|
||||||
|
ID: "1",
|
||||||
|
JobSettings: &jobs.JobSettings{Name: "job1"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Pipelines: map[string]*resources.Pipeline{
|
||||||
|
"pipeline1": {
|
||||||
|
ID: "3",
|
||||||
|
PipelineSpec: &pipelines.PipelineSpec{Name: "pipeline1"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Experiments: map[string]*resources.MlflowExperiment{
|
||||||
|
"experiment1": {
|
||||||
|
ID: "4",
|
||||||
|
Experiment: &ml.Experiment{Name: "experiment1"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Models: map[string]*resources.MlflowModel{
|
||||||
|
"model1": {
|
||||||
|
ID: "a model uses its name for identifier",
|
||||||
|
Model: &ml.Model{Name: "a model uses its name for identifier"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
ModelServingEndpoints: map[string]*resources.ModelServingEndpoint{
|
||||||
|
"servingendpoint1": {
|
||||||
|
ID: "my_serving_endpoint",
|
||||||
|
CreateServingEndpoint: &serving.CreateServingEndpoint{
|
||||||
|
Name: "my_serving_endpoint",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
RegisteredModels: map[string]*resources.RegisteredModel{
|
||||||
|
"registeredmodel1": {
|
||||||
|
ID: "8",
|
||||||
|
CreateRegisteredModelRequest: &catalog.CreateRegisteredModelRequest{
|
||||||
|
Name: "my_registered_model",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
QualityMonitors: map[string]*resources.QualityMonitor{
|
||||||
|
"qualityMonitor1": {
|
||||||
|
CreateMonitor: &catalog.CreateMonitor{
|
||||||
|
TableName: "catalog.schema.qualityMonitor1",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Schemas: map[string]*resources.Schema{
|
||||||
|
"schema1": {
|
||||||
|
ID: "catalog.schema",
|
||||||
|
CreateSchema: &catalog.CreateSchema{
|
||||||
|
Name: "schema",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Clusters: map[string]*resources.Cluster{
|
||||||
|
"cluster1": {
|
||||||
|
ID: "1017-103929-vlr7jzcf",
|
||||||
|
ClusterSpec: &compute.ClusterSpec{
|
||||||
|
ClusterName: "cluster1",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
expectedURLs := map[string]string{
|
||||||
|
"job1": "https://mycompany.databricks.com/jobs/1?o=123456",
|
||||||
|
"pipeline1": "https://mycompany.databricks.com/pipelines/3?o=123456",
|
||||||
|
"experiment1": "https://mycompany.databricks.com/ml/experiments/4?o=123456",
|
||||||
|
"model1": "https://mycompany.databricks.com/ml/models/a%20model%20uses%20its%20name%20for%20identifier?o=123456",
|
||||||
|
"servingendpoint1": "https://mycompany.databricks.com/ml/endpoints/my_serving_endpoint?o=123456",
|
||||||
|
"registeredmodel1": "https://mycompany.databricks.com/explore/data/models/8?o=123456",
|
||||||
|
"qualityMonitor1": "https://mycompany.databricks.com/explore/data/catalog/schema/qualityMonitor1?o=123456",
|
||||||
|
"schema1": "https://mycompany.databricks.com/explore/data/catalog/schema?o=123456",
|
||||||
|
"cluster1": "https://mycompany.databricks.com/compute/clusters/1017-103929-vlr7jzcf?o=123456",
|
||||||
|
}
|
||||||
|
|
||||||
|
initializeForWorkspace(b, "123456", "https://mycompany.databricks.com/")
|
||||||
|
|
||||||
|
for _, group := range b.Config.Resources.AllResources() {
|
||||||
|
for key, r := range group.Resources {
|
||||||
|
require.Equal(t, expectedURLs[key], r.GetURL(), "Unexpected URL for "+key)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestInitializeURLsWithoutOrgId(t *testing.T) {
|
||||||
|
b := &bundle.Bundle{
|
||||||
|
Config: config.Root{
|
||||||
|
Resources: config.Resources{
|
||||||
|
Jobs: map[string]*resources.Job{
|
||||||
|
"job1": {
|
||||||
|
ID: "1",
|
||||||
|
JobSettings: &jobs.JobSettings{Name: "job1"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
initializeForWorkspace(b, "123456", "https://adb-123456.azuredatabricks.net/")
|
||||||
|
|
||||||
|
require.Equal(t, "https://adb-123456.azuredatabricks.net/jobs/1", b.Config.Resources.Jobs["job1"].URL)
|
||||||
|
}
|
|
@ -5,8 +5,8 @@ import (
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle"
|
"github.com/databricks/cli/bundle"
|
||||||
"github.com/databricks/cli/bundle/config"
|
"github.com/databricks/cli/bundle/config"
|
||||||
"github.com/databricks/cli/libs/auth"
|
|
||||||
"github.com/databricks/cli/libs/diag"
|
"github.com/databricks/cli/libs/diag"
|
||||||
|
"github.com/databricks/cli/libs/iamutil"
|
||||||
"github.com/databricks/cli/libs/tags"
|
"github.com/databricks/cli/libs/tags"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -33,7 +33,7 @@ func (m *populateCurrentUser) Apply(ctx context.Context, b *bundle.Bundle) diag.
|
||||||
}
|
}
|
||||||
|
|
||||||
b.Config.Workspace.CurrentUser = &config.User{
|
b.Config.Workspace.CurrentUser = &config.User{
|
||||||
ShortName: auth.GetShortUserName(me),
|
ShortName: iamutil.GetShortUserName(me),
|
||||||
User: me,
|
User: me,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,67 @@
|
||||||
|
package mutator
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/databricks/cli/bundle"
|
||||||
|
"github.com/databricks/cli/libs/diag"
|
||||||
|
"github.com/databricks/cli/libs/dyn"
|
||||||
|
)
|
||||||
|
|
||||||
|
type prependWorkspacePrefix struct{}
|
||||||
|
|
||||||
|
// PrependWorkspacePrefix prepends the workspace root path to all paths in the bundle.
|
||||||
|
func PrependWorkspacePrefix() bundle.Mutator {
|
||||||
|
return &prependWorkspacePrefix{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *prependWorkspacePrefix) Name() string {
|
||||||
|
return "PrependWorkspacePrefix"
|
||||||
|
}
|
||||||
|
|
||||||
|
var skipPrefixes = []string{
|
||||||
|
"/Workspace/",
|
||||||
|
"/Volumes/",
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *prependWorkspacePrefix) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||||
|
patterns := []dyn.Pattern{
|
||||||
|
dyn.NewPattern(dyn.Key("workspace"), dyn.Key("root_path")),
|
||||||
|
dyn.NewPattern(dyn.Key("workspace"), dyn.Key("file_path")),
|
||||||
|
dyn.NewPattern(dyn.Key("workspace"), dyn.Key("artifact_path")),
|
||||||
|
dyn.NewPattern(dyn.Key("workspace"), dyn.Key("state_path")),
|
||||||
|
}
|
||||||
|
|
||||||
|
err := b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) {
|
||||||
|
var err error
|
||||||
|
for _, pattern := range patterns {
|
||||||
|
v, err = dyn.MapByPattern(v, pattern, func(p dyn.Path, pv dyn.Value) (dyn.Value, error) {
|
||||||
|
path, ok := pv.AsString()
|
||||||
|
if !ok {
|
||||||
|
return dyn.InvalidValue, fmt.Errorf("expected string, got %s", v.Kind())
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, prefix := range skipPrefixes {
|
||||||
|
if strings.HasPrefix(path, prefix) {
|
||||||
|
return pv, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return dyn.NewValue(fmt.Sprintf("/Workspace%s", path), v.Locations()), nil
|
||||||
|
})
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return dyn.InvalidValue, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return v, nil
|
||||||
|
})
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return diag.FromErr(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
|
@ -0,0 +1,79 @@
|
||||||
|
package mutator
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/databricks/cli/bundle"
|
||||||
|
"github.com/databricks/cli/bundle/config"
|
||||||
|
"github.com/databricks/databricks-sdk-go/service/iam"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestPrependWorkspacePrefix(t *testing.T) {
|
||||||
|
testCases := []struct {
|
||||||
|
path string
|
||||||
|
expected string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
path: "/Users/test",
|
||||||
|
expected: "/Workspace/Users/test",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
path: "/Shared/test",
|
||||||
|
expected: "/Workspace/Shared/test",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
path: "/Workspace/Users/test",
|
||||||
|
expected: "/Workspace/Users/test",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
path: "/Volumes/Users/test",
|
||||||
|
expected: "/Volumes/Users/test",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range testCases {
|
||||||
|
b := &bundle.Bundle{
|
||||||
|
Config: config.Root{
|
||||||
|
Workspace: config.Workspace{
|
||||||
|
RootPath: tc.path,
|
||||||
|
ArtifactPath: tc.path,
|
||||||
|
FilePath: tc.path,
|
||||||
|
StatePath: tc.path,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
diags := bundle.Apply(context.Background(), b, PrependWorkspacePrefix())
|
||||||
|
require.Empty(t, diags)
|
||||||
|
require.Equal(t, tc.expected, b.Config.Workspace.RootPath)
|
||||||
|
require.Equal(t, tc.expected, b.Config.Workspace.ArtifactPath)
|
||||||
|
require.Equal(t, tc.expected, b.Config.Workspace.FilePath)
|
||||||
|
require.Equal(t, tc.expected, b.Config.Workspace.StatePath)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPrependWorkspaceForDefaultConfig(t *testing.T) {
|
||||||
|
b := &bundle.Bundle{
|
||||||
|
Config: config.Root{
|
||||||
|
Bundle: config.Bundle{
|
||||||
|
Name: "test",
|
||||||
|
Target: "dev",
|
||||||
|
},
|
||||||
|
Workspace: config.Workspace{
|
||||||
|
CurrentUser: &config.User{
|
||||||
|
User: &iam.User{
|
||||||
|
UserName: "jane@doe.com",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
diags := bundle.Apply(context.Background(), b, bundle.Seq(DefineDefaultWorkspaceRoot(), ExpandWorkspaceRoot(), DefineDefaultWorkspacePaths(), PrependWorkspacePrefix()))
|
||||||
|
require.Empty(t, diags)
|
||||||
|
require.Equal(t, "/Workspace/Users/jane@doe.com/.bundle/test/dev", b.Config.Workspace.RootPath)
|
||||||
|
require.Equal(t, "/Workspace/Users/jane@doe.com/.bundle/test/dev/artifacts", b.Config.Workspace.ArtifactPath)
|
||||||
|
require.Equal(t, "/Workspace/Users/jane@doe.com/.bundle/test/dev/files", b.Config.Workspace.FilePath)
|
||||||
|
require.Equal(t, "/Workspace/Users/jane@doe.com/.bundle/test/dev/state", b.Config.Workspace.StatePath)
|
||||||
|
}
|
|
@ -6,9 +6,9 @@ import (
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle"
|
"github.com/databricks/cli/bundle"
|
||||||
"github.com/databricks/cli/bundle/config"
|
"github.com/databricks/cli/bundle/config"
|
||||||
"github.com/databricks/cli/libs/auth"
|
|
||||||
"github.com/databricks/cli/libs/diag"
|
"github.com/databricks/cli/libs/diag"
|
||||||
"github.com/databricks/cli/libs/dyn"
|
"github.com/databricks/cli/libs/dyn"
|
||||||
|
"github.com/databricks/cli/libs/iamutil"
|
||||||
"github.com/databricks/cli/libs/log"
|
"github.com/databricks/cli/libs/log"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -174,7 +174,7 @@ func (m *processTargetMode) Apply(ctx context.Context, b *bundle.Bundle) diag.Di
|
||||||
transformDevelopmentMode(ctx, b)
|
transformDevelopmentMode(ctx, b)
|
||||||
return diags
|
return diags
|
||||||
case config.Production:
|
case config.Production:
|
||||||
isPrincipal := auth.IsServicePrincipal(b.Config.Workspace.CurrentUser.UserName)
|
isPrincipal := iamutil.IsServicePrincipal(b.Config.Workspace.CurrentUser.User)
|
||||||
return validateProductionMode(ctx, b, isPrincipal)
|
return validateProductionMode(ctx, b, isPrincipal)
|
||||||
case "":
|
case "":
|
||||||
// No action
|
// No action
|
||||||
|
|
|
@ -0,0 +1,72 @@
|
||||||
|
package mutator
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/databricks/cli/bundle"
|
||||||
|
"github.com/databricks/cli/libs/diag"
|
||||||
|
"github.com/databricks/cli/libs/dyn"
|
||||||
|
)
|
||||||
|
|
||||||
|
type rewriteWorkspacePrefix struct{}
|
||||||
|
|
||||||
|
// RewriteWorkspacePrefix finds any strings in bundle configration that have
|
||||||
|
// workspace prefix plus workspace path variable used and removes workspace prefix from it.
|
||||||
|
func RewriteWorkspacePrefix() bundle.Mutator {
|
||||||
|
return &rewriteWorkspacePrefix{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *rewriteWorkspacePrefix) Name() string {
|
||||||
|
return "RewriteWorkspacePrefix"
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *rewriteWorkspacePrefix) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||||
|
diags := diag.Diagnostics{}
|
||||||
|
paths := map[string]string{
|
||||||
|
"/Workspace/${workspace.root_path}": "${workspace.root_path}",
|
||||||
|
"/Workspace${workspace.root_path}": "${workspace.root_path}",
|
||||||
|
"/Workspace/${workspace.file_path}": "${workspace.file_path}",
|
||||||
|
"/Workspace${workspace.file_path}": "${workspace.file_path}",
|
||||||
|
"/Workspace/${workspace.artifact_path}": "${workspace.artifact_path}",
|
||||||
|
"/Workspace${workspace.artifact_path}": "${workspace.artifact_path}",
|
||||||
|
"/Workspace/${workspace.state_path}": "${workspace.state_path}",
|
||||||
|
"/Workspace${workspace.state_path}": "${workspace.state_path}",
|
||||||
|
}
|
||||||
|
|
||||||
|
err := b.Config.Mutate(func(root dyn.Value) (dyn.Value, error) {
|
||||||
|
// Walk through the bundle configuration, check all the string leafs and
|
||||||
|
// see if any of the prefixes are used in the remote path.
|
||||||
|
return dyn.Walk(root, func(p dyn.Path, v dyn.Value) (dyn.Value, error) {
|
||||||
|
vv, ok := v.AsString()
|
||||||
|
if !ok {
|
||||||
|
return v, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
for path, replacePath := range paths {
|
||||||
|
if strings.Contains(vv, path) {
|
||||||
|
newPath := strings.Replace(vv, path, replacePath, 1)
|
||||||
|
diags = append(diags, diag.Diagnostic{
|
||||||
|
Severity: diag.Warning,
|
||||||
|
Summary: fmt.Sprintf("substring %q found in %q. Please update this to %q.", path, vv, newPath),
|
||||||
|
Detail: "For more information, please refer to: https://docs.databricks.com/en/release-notes/dev-tools/bundles.html#workspace-paths",
|
||||||
|
Locations: v.Locations(),
|
||||||
|
Paths: []dyn.Path{p},
|
||||||
|
})
|
||||||
|
|
||||||
|
// Remove the workspace prefix from the string.
|
||||||
|
return dyn.NewValue(newPath, v.Locations()), nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return v, nil
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return diag.FromErr(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return diags
|
||||||
|
}
|
|
@ -0,0 +1,85 @@
|
||||||
|
package mutator
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/databricks/cli/bundle"
|
||||||
|
"github.com/databricks/cli/bundle/config"
|
||||||
|
"github.com/databricks/cli/bundle/config/resources"
|
||||||
|
"github.com/databricks/cli/libs/diag"
|
||||||
|
"github.com/databricks/databricks-sdk-go/service/compute"
|
||||||
|
"github.com/databricks/databricks-sdk-go/service/jobs"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestNoWorkspacePrefixUsed(t *testing.T) {
|
||||||
|
b := &bundle.Bundle{
|
||||||
|
Config: config.Root{
|
||||||
|
Workspace: config.Workspace{
|
||||||
|
RootPath: "/Workspace/Users/test",
|
||||||
|
ArtifactPath: "/Workspace/Users/test/artifacts",
|
||||||
|
FilePath: "/Workspace/Users/test/files",
|
||||||
|
StatePath: "/Workspace/Users/test/state",
|
||||||
|
},
|
||||||
|
|
||||||
|
Resources: config.Resources{
|
||||||
|
Jobs: map[string]*resources.Job{
|
||||||
|
"test_job": {
|
||||||
|
JobSettings: &jobs.JobSettings{
|
||||||
|
Tasks: []jobs.Task{
|
||||||
|
{
|
||||||
|
SparkPythonTask: &jobs.SparkPythonTask{
|
||||||
|
PythonFile: "/Workspace/${workspace.root_path}/file1.py",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
NotebookTask: &jobs.NotebookTask{
|
||||||
|
NotebookPath: "/Workspace${workspace.file_path}/notebook1",
|
||||||
|
},
|
||||||
|
Libraries: []compute.Library{
|
||||||
|
{
|
||||||
|
Jar: "/Workspace/${workspace.artifact_path}/jar1.jar",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
NotebookTask: &jobs.NotebookTask{
|
||||||
|
NotebookPath: "${workspace.file_path}/notebook2",
|
||||||
|
},
|
||||||
|
Libraries: []compute.Library{
|
||||||
|
{
|
||||||
|
Jar: "${workspace.artifact_path}/jar2.jar",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
diags := bundle.Apply(context.Background(), b, RewriteWorkspacePrefix())
|
||||||
|
require.Len(t, diags, 3)
|
||||||
|
|
||||||
|
expectedErrors := map[string]bool{
|
||||||
|
`substring "/Workspace/${workspace.root_path}" found in "/Workspace/${workspace.root_path}/file1.py". Please update this to "${workspace.root_path}/file1.py".`: true,
|
||||||
|
`substring "/Workspace${workspace.file_path}" found in "/Workspace${workspace.file_path}/notebook1". Please update this to "${workspace.file_path}/notebook1".`: true,
|
||||||
|
`substring "/Workspace/${workspace.artifact_path}" found in "/Workspace/${workspace.artifact_path}/jar1.jar". Please update this to "${workspace.artifact_path}/jar1.jar".`: true,
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, d := range diags {
|
||||||
|
require.Equal(t, d.Severity, diag.Warning)
|
||||||
|
require.Contains(t, expectedErrors, d.Summary)
|
||||||
|
delete(expectedErrors, d.Summary)
|
||||||
|
}
|
||||||
|
|
||||||
|
require.Equal(t, "${workspace.root_path}/file1.py", b.Config.Resources.Jobs["test_job"].JobSettings.Tasks[0].SparkPythonTask.PythonFile)
|
||||||
|
require.Equal(t, "${workspace.file_path}/notebook1", b.Config.Resources.Jobs["test_job"].JobSettings.Tasks[1].NotebookTask.NotebookPath)
|
||||||
|
require.Equal(t, "${workspace.artifact_path}/jar1.jar", b.Config.Resources.Jobs["test_job"].JobSettings.Tasks[1].Libraries[0].Jar)
|
||||||
|
require.Equal(t, "${workspace.file_path}/notebook2", b.Config.Resources.Jobs["test_job"].JobSettings.Tasks[2].NotebookTask.NotebookPath)
|
||||||
|
require.Equal(t, "${workspace.artifact_path}/jar2.jar", b.Config.Resources.Jobs["test_job"].JobSettings.Tasks[2].Libraries[0].Jar)
|
||||||
|
|
||||||
|
}
|
|
@ -30,50 +30,44 @@ func (m *setRunAs) Name() string {
|
||||||
return "SetRunAs"
|
return "SetRunAs"
|
||||||
}
|
}
|
||||||
|
|
||||||
type errUnsupportedResourceTypeForRunAs struct {
|
func reportRunAsNotSupported(resourceType string, location dyn.Location, currentUser string, runAsUser string) diag.Diagnostics {
|
||||||
resourceType string
|
return diag.Diagnostics{{
|
||||||
resourceLocation dyn.Location
|
Summary: fmt.Sprintf("%s do not support a setting a run_as user that is different from the owner.\n"+
|
||||||
currentUser string
|
"Current identity: %s. Run as identity: %s.\n"+
|
||||||
runAsUser string
|
"See https://docs.databricks.com/dev-tools/bundles/run-as.html to learn more about the run_as property.", resourceType, currentUser, runAsUser),
|
||||||
|
Locations: []dyn.Location{location},
|
||||||
|
Severity: diag.Error,
|
||||||
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e errUnsupportedResourceTypeForRunAs) Error() string {
|
func validateRunAs(b *bundle.Bundle) diag.Diagnostics {
|
||||||
return fmt.Sprintf("%s are not supported when the current deployment user is different from the bundle's run_as identity. Please deploy as the run_as identity. Please refer to the documentation at https://docs.databricks.com/dev-tools/bundles/run-as.html for more details. Location of the unsupported resource: %s. Current identity: %s. Run as identity: %s", e.resourceType, e.resourceLocation, e.currentUser, e.runAsUser)
|
diags := diag.Diagnostics{}
|
||||||
}
|
|
||||||
|
|
||||||
type errBothSpAndUserSpecified struct {
|
neitherSpecifiedErr := diag.Diagnostics{{
|
||||||
spName string
|
Summary: "run_as section must specify exactly one identity. Neither service_principal_name nor user_name is specified",
|
||||||
spLoc dyn.Location
|
Locations: []dyn.Location{b.Config.GetLocation("run_as")},
|
||||||
userName string
|
Severity: diag.Error,
|
||||||
userLoc dyn.Location
|
}}
|
||||||
}
|
|
||||||
|
|
||||||
func (e errBothSpAndUserSpecified) Error() string {
|
// Fail fast if neither service_principal_name nor user_name are specified, but the
|
||||||
return fmt.Sprintf("run_as section must specify exactly one identity. A service_principal_name %q is specified at %s. A user_name %q is defined at %s", e.spName, e.spLoc, e.userName, e.userLoc)
|
|
||||||
}
|
|
||||||
|
|
||||||
func validateRunAs(b *bundle.Bundle) error {
|
|
||||||
neitherSpecifiedErr := fmt.Errorf("run_as section must specify exactly one identity. Neither service_principal_name nor user_name is specified at %s", b.Config.GetLocation("run_as"))
|
|
||||||
// Error if neither service_principal_name nor user_name are specified, but the
|
|
||||||
// run_as section is present.
|
// run_as section is present.
|
||||||
if b.Config.Value().Get("run_as").Kind() == dyn.KindNil {
|
if b.Config.Value().Get("run_as").Kind() == dyn.KindNil {
|
||||||
return neitherSpecifiedErr
|
return neitherSpecifiedErr
|
||||||
}
|
}
|
||||||
// Error if one or both of service_principal_name and user_name are specified,
|
|
||||||
|
// Fail fast if one or both of service_principal_name and user_name are specified,
|
||||||
// but with empty values.
|
// but with empty values.
|
||||||
if b.Config.RunAs.ServicePrincipalName == "" && b.Config.RunAs.UserName == "" {
|
runAs := b.Config.RunAs
|
||||||
|
if runAs.ServicePrincipalName == "" && runAs.UserName == "" {
|
||||||
return neitherSpecifiedErr
|
return neitherSpecifiedErr
|
||||||
}
|
}
|
||||||
|
|
||||||
// Error if both service_principal_name and user_name are specified
|
|
||||||
runAs := b.Config.RunAs
|
|
||||||
if runAs.UserName != "" && runAs.ServicePrincipalName != "" {
|
if runAs.UserName != "" && runAs.ServicePrincipalName != "" {
|
||||||
return errBothSpAndUserSpecified{
|
diags = diags.Extend(diag.Diagnostics{{
|
||||||
spName: runAs.ServicePrincipalName,
|
Summary: "run_as section cannot specify both user_name and service_principal_name",
|
||||||
userName: runAs.UserName,
|
Locations: []dyn.Location{b.Config.GetLocation("run_as")},
|
||||||
spLoc: b.Config.GetLocation("run_as.service_principal_name"),
|
Severity: diag.Error,
|
||||||
userLoc: b.Config.GetLocation("run_as.user_name"),
|
}})
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
identity := runAs.ServicePrincipalName
|
identity := runAs.ServicePrincipalName
|
||||||
|
@ -83,40 +77,40 @@ func validateRunAs(b *bundle.Bundle) error {
|
||||||
|
|
||||||
// All resources are supported if the run_as identity is the same as the current deployment identity.
|
// All resources are supported if the run_as identity is the same as the current deployment identity.
|
||||||
if identity == b.Config.Workspace.CurrentUser.UserName {
|
if identity == b.Config.Workspace.CurrentUser.UserName {
|
||||||
return nil
|
return diags
|
||||||
}
|
}
|
||||||
|
|
||||||
// DLT pipelines do not support run_as in the API.
|
// DLT pipelines do not support run_as in the API.
|
||||||
if len(b.Config.Resources.Pipelines) > 0 {
|
if len(b.Config.Resources.Pipelines) > 0 {
|
||||||
return errUnsupportedResourceTypeForRunAs{
|
diags = diags.Extend(reportRunAsNotSupported(
|
||||||
resourceType: "pipelines",
|
"pipelines",
|
||||||
resourceLocation: b.Config.GetLocation("resources.pipelines"),
|
b.Config.GetLocation("resources.pipelines"),
|
||||||
currentUser: b.Config.Workspace.CurrentUser.UserName,
|
b.Config.Workspace.CurrentUser.UserName,
|
||||||
runAsUser: identity,
|
identity,
|
||||||
}
|
))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Model serving endpoints do not support run_as in the API.
|
// Model serving endpoints do not support run_as in the API.
|
||||||
if len(b.Config.Resources.ModelServingEndpoints) > 0 {
|
if len(b.Config.Resources.ModelServingEndpoints) > 0 {
|
||||||
return errUnsupportedResourceTypeForRunAs{
|
diags = diags.Extend(reportRunAsNotSupported(
|
||||||
resourceType: "model_serving_endpoints",
|
"model_serving_endpoints",
|
||||||
resourceLocation: b.Config.GetLocation("resources.model_serving_endpoints"),
|
b.Config.GetLocation("resources.model_serving_endpoints"),
|
||||||
currentUser: b.Config.Workspace.CurrentUser.UserName,
|
b.Config.Workspace.CurrentUser.UserName,
|
||||||
runAsUser: identity,
|
identity,
|
||||||
}
|
))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Monitors do not support run_as in the API.
|
// Monitors do not support run_as in the API.
|
||||||
if len(b.Config.Resources.QualityMonitors) > 0 {
|
if len(b.Config.Resources.QualityMonitors) > 0 {
|
||||||
return errUnsupportedResourceTypeForRunAs{
|
diags = diags.Extend(reportRunAsNotSupported(
|
||||||
resourceType: "quality_monitors",
|
"quality_monitors",
|
||||||
resourceLocation: b.Config.GetLocation("resources.quality_monitors"),
|
b.Config.GetLocation("resources.quality_monitors"),
|
||||||
currentUser: b.Config.Workspace.CurrentUser.UserName,
|
b.Config.Workspace.CurrentUser.UserName,
|
||||||
runAsUser: identity,
|
identity,
|
||||||
}
|
))
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return diags
|
||||||
}
|
}
|
||||||
|
|
||||||
func setRunAsForJobs(b *bundle.Bundle) {
|
func setRunAsForJobs(b *bundle.Bundle) {
|
||||||
|
@ -187,8 +181,9 @@ func (m *setRunAs) Apply(_ context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Assert the run_as configuration is valid in the context of the bundle
|
// Assert the run_as configuration is valid in the context of the bundle
|
||||||
if err := validateRunAs(b); err != nil {
|
diags := validateRunAs(b)
|
||||||
return diag.FromErr(err)
|
if diags.HasError() {
|
||||||
|
return diags
|
||||||
}
|
}
|
||||||
|
|
||||||
setRunAsForJobs(b)
|
setRunAsForJobs(b)
|
||||||
|
|
|
@ -188,11 +188,8 @@ func TestRunAsErrorForUnsupportedResources(t *testing.T) {
|
||||||
Config: *r,
|
Config: *r,
|
||||||
}
|
}
|
||||||
diags := bundle.Apply(context.Background(), b, SetRunAs())
|
diags := bundle.Apply(context.Background(), b, SetRunAs())
|
||||||
assert.Equal(t, diags.Error().Error(), errUnsupportedResourceTypeForRunAs{
|
assert.Contains(t, diags.Error().Error(), "do not support a setting a run_as user that is different from the owner.\n"+
|
||||||
resourceType: rt,
|
"Current identity: alice. Run as identity: bob.\n"+
|
||||||
resourceLocation: dyn.Location{},
|
"See https://docs.databricks.com/dev-tools/bundles/run-as.html to learn more about the run_as property.", rt)
|
||||||
currentUser: "alice",
|
|
||||||
runAsUser: "bob",
|
|
||||||
}.Error(), "expected run_as with a different identity than the current deployment user to not supported for resources of type: %s", rt)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -3,6 +3,7 @@ package config
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"net/url"
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle/config/resources"
|
"github.com/databricks/cli/bundle/config/resources"
|
||||||
"github.com/databricks/databricks-sdk-go"
|
"github.com/databricks/databricks-sdk-go"
|
||||||
|
@ -31,6 +32,53 @@ type ConfigResource interface {
|
||||||
// Terraform equivalent name of the resource. For example "databricks_job"
|
// Terraform equivalent name of the resource. For example "databricks_job"
|
||||||
// for jobs and "databricks_pipeline" for pipelines.
|
// for jobs and "databricks_pipeline" for pipelines.
|
||||||
TerraformResourceName() string
|
TerraformResourceName() string
|
||||||
|
|
||||||
|
// GetName returns the in-product name of the resource.
|
||||||
|
GetName() string
|
||||||
|
|
||||||
|
// GetURL returns the URL of the resource.
|
||||||
|
GetURL() string
|
||||||
|
|
||||||
|
// InitializeURL initializes the URL field of the resource.
|
||||||
|
InitializeURL(baseURL url.URL)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ResourceGroup represents a group of resources of the same type.
|
||||||
|
// It includes a description of the resource type and a map of resources.
|
||||||
|
type ResourceGroup struct {
|
||||||
|
Description ResourceDescription
|
||||||
|
Resources map[string]ConfigResource
|
||||||
|
}
|
||||||
|
|
||||||
|
// collectResourceMap collects resources of a specific type into a ResourceGroup.
|
||||||
|
func collectResourceMap[T ConfigResource](
|
||||||
|
description ResourceDescription,
|
||||||
|
input map[string]T,
|
||||||
|
) ResourceGroup {
|
||||||
|
resources := make(map[string]ConfigResource)
|
||||||
|
for key, resource := range input {
|
||||||
|
resources[key] = resource
|
||||||
|
}
|
||||||
|
return ResourceGroup{
|
||||||
|
Description: description,
|
||||||
|
Resources: resources,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// AllResources returns all resources in the bundle grouped by their resource type.
|
||||||
|
func (r *Resources) AllResources() []ResourceGroup {
|
||||||
|
descriptions := SupportedResources()
|
||||||
|
return []ResourceGroup{
|
||||||
|
collectResourceMap(descriptions["jobs"], r.Jobs),
|
||||||
|
collectResourceMap(descriptions["pipelines"], r.Pipelines),
|
||||||
|
collectResourceMap(descriptions["models"], r.Models),
|
||||||
|
collectResourceMap(descriptions["experiments"], r.Experiments),
|
||||||
|
collectResourceMap(descriptions["model_serving_endpoints"], r.ModelServingEndpoints),
|
||||||
|
collectResourceMap(descriptions["registered_models"], r.RegisteredModels),
|
||||||
|
collectResourceMap(descriptions["quality_monitors"], r.QualityMonitors),
|
||||||
|
collectResourceMap(descriptions["schemas"], r.Schemas),
|
||||||
|
collectResourceMap(descriptions["clusters"], r.Clusters),
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *Resources) FindResourceByConfigKey(key string) (ConfigResource, error) {
|
func (r *Resources) FindResourceByConfigKey(key string) (ConfigResource, error) {
|
||||||
|
@ -60,3 +108,73 @@ func (r *Resources) FindResourceByConfigKey(key string) (ConfigResource, error)
|
||||||
|
|
||||||
return found[0], nil
|
return found[0], nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type ResourceDescription struct {
|
||||||
|
// Singular and plural name when used to refer to the configuration.
|
||||||
|
SingularName string
|
||||||
|
PluralName string
|
||||||
|
|
||||||
|
// Singular and plural title when used in summaries / terminal UI.
|
||||||
|
SingularTitle string
|
||||||
|
PluralTitle string
|
||||||
|
}
|
||||||
|
|
||||||
|
// The keys of the map corresponds to the resource key in the bundle configuration.
|
||||||
|
func SupportedResources() map[string]ResourceDescription {
|
||||||
|
return map[string]ResourceDescription{
|
||||||
|
"jobs": {
|
||||||
|
SingularName: "job",
|
||||||
|
PluralName: "jobs",
|
||||||
|
SingularTitle: "Job",
|
||||||
|
PluralTitle: "Jobs",
|
||||||
|
},
|
||||||
|
"pipelines": {
|
||||||
|
SingularName: "pipeline",
|
||||||
|
PluralName: "pipelines",
|
||||||
|
SingularTitle: "Pipeline",
|
||||||
|
PluralTitle: "Pipelines",
|
||||||
|
},
|
||||||
|
"models": {
|
||||||
|
SingularName: "model",
|
||||||
|
PluralName: "models",
|
||||||
|
SingularTitle: "Model",
|
||||||
|
PluralTitle: "Models",
|
||||||
|
},
|
||||||
|
"experiments": {
|
||||||
|
SingularName: "experiment",
|
||||||
|
PluralName: "experiments",
|
||||||
|
SingularTitle: "Experiment",
|
||||||
|
PluralTitle: "Experiments",
|
||||||
|
},
|
||||||
|
"model_serving_endpoints": {
|
||||||
|
SingularName: "model_serving_endpoint",
|
||||||
|
PluralName: "model_serving_endpoints",
|
||||||
|
SingularTitle: "Model Serving Endpoint",
|
||||||
|
PluralTitle: "Model Serving Endpoints",
|
||||||
|
},
|
||||||
|
"registered_models": {
|
||||||
|
SingularName: "registered_model",
|
||||||
|
PluralName: "registered_models",
|
||||||
|
SingularTitle: "Registered Model",
|
||||||
|
PluralTitle: "Registered Models",
|
||||||
|
},
|
||||||
|
"quality_monitors": {
|
||||||
|
SingularName: "quality_monitor",
|
||||||
|
PluralName: "quality_monitors",
|
||||||
|
SingularTitle: "Quality Monitor",
|
||||||
|
PluralTitle: "Quality Monitors",
|
||||||
|
},
|
||||||
|
"schemas": {
|
||||||
|
SingularName: "schema",
|
||||||
|
PluralName: "schemas",
|
||||||
|
SingularTitle: "Schema",
|
||||||
|
PluralTitle: "Schemas",
|
||||||
|
},
|
||||||
|
"clusters": {
|
||||||
|
SingularName: "cluster",
|
||||||
|
PluralName: "clusters",
|
||||||
|
SingularTitle: "Cluster",
|
||||||
|
PluralTitle: "Clusters",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -2,6 +2,8 @@ package resources
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"net/url"
|
||||||
|
|
||||||
"github.com/databricks/cli/libs/log"
|
"github.com/databricks/cli/libs/log"
|
||||||
"github.com/databricks/databricks-sdk-go"
|
"github.com/databricks/databricks-sdk-go"
|
||||||
|
@ -13,6 +15,7 @@ type Cluster struct {
|
||||||
ID string `json:"id,omitempty" bundle:"readonly"`
|
ID string `json:"id,omitempty" bundle:"readonly"`
|
||||||
Permissions []Permission `json:"permissions,omitempty"`
|
Permissions []Permission `json:"permissions,omitempty"`
|
||||||
ModifiedStatus ModifiedStatus `json:"modified_status,omitempty" bundle:"internal"`
|
ModifiedStatus ModifiedStatus `json:"modified_status,omitempty" bundle:"internal"`
|
||||||
|
URL string `json:"url,omitempty" bundle:"internal"`
|
||||||
|
|
||||||
*compute.ClusterSpec
|
*compute.ClusterSpec
|
||||||
}
|
}
|
||||||
|
@ -37,3 +40,19 @@ func (s *Cluster) Exists(ctx context.Context, w *databricks.WorkspaceClient, id
|
||||||
func (s *Cluster) TerraformResourceName() string {
|
func (s *Cluster) TerraformResourceName() string {
|
||||||
return "databricks_cluster"
|
return "databricks_cluster"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *Cluster) InitializeURL(baseURL url.URL) {
|
||||||
|
if s.ID == "" {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
baseURL.Path = fmt.Sprintf("compute/clusters/%s", s.ID)
|
||||||
|
s.URL = baseURL.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Cluster) GetName() string {
|
||||||
|
return s.ClusterName
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Cluster) GetURL() string {
|
||||||
|
return s.URL
|
||||||
|
}
|
||||||
|
|
|
@ -2,6 +2,8 @@ package resources
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"net/url"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
|
||||||
"github.com/databricks/cli/libs/log"
|
"github.com/databricks/cli/libs/log"
|
||||||
|
@ -14,6 +16,7 @@ type Job struct {
|
||||||
ID string `json:"id,omitempty" bundle:"readonly"`
|
ID string `json:"id,omitempty" bundle:"readonly"`
|
||||||
Permissions []Permission `json:"permissions,omitempty"`
|
Permissions []Permission `json:"permissions,omitempty"`
|
||||||
ModifiedStatus ModifiedStatus `json:"modified_status,omitempty" bundle:"internal"`
|
ModifiedStatus ModifiedStatus `json:"modified_status,omitempty" bundle:"internal"`
|
||||||
|
URL string `json:"url,omitempty" bundle:"internal"`
|
||||||
|
|
||||||
*jobs.JobSettings
|
*jobs.JobSettings
|
||||||
}
|
}
|
||||||
|
@ -44,3 +47,19 @@ func (j *Job) Exists(ctx context.Context, w *databricks.WorkspaceClient, id stri
|
||||||
func (j *Job) TerraformResourceName() string {
|
func (j *Job) TerraformResourceName() string {
|
||||||
return "databricks_job"
|
return "databricks_job"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (j *Job) InitializeURL(baseURL url.URL) {
|
||||||
|
if j.ID == "" {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
baseURL.Path = fmt.Sprintf("jobs/%s", j.ID)
|
||||||
|
j.URL = baseURL.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (j *Job) GetName() string {
|
||||||
|
return j.Name
|
||||||
|
}
|
||||||
|
|
||||||
|
func (j *Job) GetURL() string {
|
||||||
|
return j.URL
|
||||||
|
}
|
||||||
|
|
|
@ -2,6 +2,8 @@ package resources
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"net/url"
|
||||||
|
|
||||||
"github.com/databricks/cli/libs/log"
|
"github.com/databricks/cli/libs/log"
|
||||||
"github.com/databricks/databricks-sdk-go"
|
"github.com/databricks/databricks-sdk-go"
|
||||||
|
@ -13,6 +15,7 @@ type MlflowExperiment struct {
|
||||||
ID string `json:"id,omitempty" bundle:"readonly"`
|
ID string `json:"id,omitempty" bundle:"readonly"`
|
||||||
Permissions []Permission `json:"permissions,omitempty"`
|
Permissions []Permission `json:"permissions,omitempty"`
|
||||||
ModifiedStatus ModifiedStatus `json:"modified_status,omitempty" bundle:"internal"`
|
ModifiedStatus ModifiedStatus `json:"modified_status,omitempty" bundle:"internal"`
|
||||||
|
URL string `json:"url,omitempty" bundle:"internal"`
|
||||||
|
|
||||||
*ml.Experiment
|
*ml.Experiment
|
||||||
}
|
}
|
||||||
|
@ -39,3 +42,19 @@ func (s *MlflowExperiment) Exists(ctx context.Context, w *databricks.WorkspaceCl
|
||||||
func (s *MlflowExperiment) TerraformResourceName() string {
|
func (s *MlflowExperiment) TerraformResourceName() string {
|
||||||
return "databricks_mlflow_experiment"
|
return "databricks_mlflow_experiment"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *MlflowExperiment) InitializeURL(baseURL url.URL) {
|
||||||
|
if s.ID == "" {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
baseURL.Path = fmt.Sprintf("ml/experiments/%s", s.ID)
|
||||||
|
s.URL = baseURL.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *MlflowExperiment) GetName() string {
|
||||||
|
return s.Name
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *MlflowExperiment) GetURL() string {
|
||||||
|
return s.URL
|
||||||
|
}
|
||||||
|
|
|
@ -2,6 +2,8 @@ package resources
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"net/url"
|
||||||
|
|
||||||
"github.com/databricks/cli/libs/log"
|
"github.com/databricks/cli/libs/log"
|
||||||
"github.com/databricks/databricks-sdk-go"
|
"github.com/databricks/databricks-sdk-go"
|
||||||
|
@ -13,6 +15,7 @@ type MlflowModel struct {
|
||||||
ID string `json:"id,omitempty" bundle:"readonly"`
|
ID string `json:"id,omitempty" bundle:"readonly"`
|
||||||
Permissions []Permission `json:"permissions,omitempty"`
|
Permissions []Permission `json:"permissions,omitempty"`
|
||||||
ModifiedStatus ModifiedStatus `json:"modified_status,omitempty" bundle:"internal"`
|
ModifiedStatus ModifiedStatus `json:"modified_status,omitempty" bundle:"internal"`
|
||||||
|
URL string `json:"url,omitempty" bundle:"internal"`
|
||||||
|
|
||||||
*ml.Model
|
*ml.Model
|
||||||
}
|
}
|
||||||
|
@ -39,3 +42,19 @@ func (s *MlflowModel) Exists(ctx context.Context, w *databricks.WorkspaceClient,
|
||||||
func (s *MlflowModel) TerraformResourceName() string {
|
func (s *MlflowModel) TerraformResourceName() string {
|
||||||
return "databricks_mlflow_model"
|
return "databricks_mlflow_model"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *MlflowModel) InitializeURL(baseURL url.URL) {
|
||||||
|
if s.ID == "" {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
baseURL.Path = fmt.Sprintf("ml/models/%s", s.ID)
|
||||||
|
s.URL = baseURL.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *MlflowModel) GetName() string {
|
||||||
|
return s.Name
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *MlflowModel) GetURL() string {
|
||||||
|
return s.URL
|
||||||
|
}
|
||||||
|
|
|
@ -2,6 +2,8 @@ package resources
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"net/url"
|
||||||
|
|
||||||
"github.com/databricks/cli/libs/log"
|
"github.com/databricks/cli/libs/log"
|
||||||
"github.com/databricks/databricks-sdk-go"
|
"github.com/databricks/databricks-sdk-go"
|
||||||
|
@ -23,6 +25,7 @@ type ModelServingEndpoint struct {
|
||||||
Permissions []Permission `json:"permissions,omitempty"`
|
Permissions []Permission `json:"permissions,omitempty"`
|
||||||
|
|
||||||
ModifiedStatus ModifiedStatus `json:"modified_status,omitempty" bundle:"internal"`
|
ModifiedStatus ModifiedStatus `json:"modified_status,omitempty" bundle:"internal"`
|
||||||
|
URL string `json:"url,omitempty" bundle:"internal"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *ModelServingEndpoint) UnmarshalJSON(b []byte) error {
|
func (s *ModelServingEndpoint) UnmarshalJSON(b []byte) error {
|
||||||
|
@ -47,3 +50,19 @@ func (s *ModelServingEndpoint) Exists(ctx context.Context, w *databricks.Workspa
|
||||||
func (s *ModelServingEndpoint) TerraformResourceName() string {
|
func (s *ModelServingEndpoint) TerraformResourceName() string {
|
||||||
return "databricks_model_serving"
|
return "databricks_model_serving"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *ModelServingEndpoint) InitializeURL(baseURL url.URL) {
|
||||||
|
if s.ID == "" {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
baseURL.Path = fmt.Sprintf("ml/endpoints/%s", s.ID)
|
||||||
|
s.URL = baseURL.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *ModelServingEndpoint) GetName() string {
|
||||||
|
return s.Name
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *ModelServingEndpoint) GetURL() string {
|
||||||
|
return s.URL
|
||||||
|
}
|
||||||
|
|
|
@ -2,6 +2,8 @@ package resources
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"net/url"
|
||||||
|
|
||||||
"github.com/databricks/cli/libs/log"
|
"github.com/databricks/cli/libs/log"
|
||||||
"github.com/databricks/databricks-sdk-go"
|
"github.com/databricks/databricks-sdk-go"
|
||||||
|
@ -13,6 +15,7 @@ type Pipeline struct {
|
||||||
ID string `json:"id,omitempty" bundle:"readonly"`
|
ID string `json:"id,omitempty" bundle:"readonly"`
|
||||||
Permissions []Permission `json:"permissions,omitempty"`
|
Permissions []Permission `json:"permissions,omitempty"`
|
||||||
ModifiedStatus ModifiedStatus `json:"modified_status,omitempty" bundle:"internal"`
|
ModifiedStatus ModifiedStatus `json:"modified_status,omitempty" bundle:"internal"`
|
||||||
|
URL string `json:"url,omitempty" bundle:"internal"`
|
||||||
|
|
||||||
*pipelines.PipelineSpec
|
*pipelines.PipelineSpec
|
||||||
}
|
}
|
||||||
|
@ -39,3 +42,19 @@ func (p *Pipeline) Exists(ctx context.Context, w *databricks.WorkspaceClient, id
|
||||||
func (p *Pipeline) TerraformResourceName() string {
|
func (p *Pipeline) TerraformResourceName() string {
|
||||||
return "databricks_pipeline"
|
return "databricks_pipeline"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (p *Pipeline) InitializeURL(baseURL url.URL) {
|
||||||
|
if p.ID == "" {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
baseURL.Path = fmt.Sprintf("pipelines/%s", p.ID)
|
||||||
|
p.URL = baseURL.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *Pipeline) GetName() string {
|
||||||
|
return p.Name
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Pipeline) GetURL() string {
|
||||||
|
return s.URL
|
||||||
|
}
|
||||||
|
|
|
@ -2,6 +2,9 @@ package resources
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"net/url"
|
||||||
|
"strings"
|
||||||
|
|
||||||
"github.com/databricks/cli/libs/log"
|
"github.com/databricks/cli/libs/log"
|
||||||
"github.com/databricks/databricks-sdk-go"
|
"github.com/databricks/databricks-sdk-go"
|
||||||
|
@ -20,6 +23,7 @@ type QualityMonitor struct {
|
||||||
ID string `json:"id,omitempty" bundle:"readonly"`
|
ID string `json:"id,omitempty" bundle:"readonly"`
|
||||||
|
|
||||||
ModifiedStatus ModifiedStatus `json:"modified_status,omitempty" bundle:"internal"`
|
ModifiedStatus ModifiedStatus `json:"modified_status,omitempty" bundle:"internal"`
|
||||||
|
URL string `json:"url,omitempty" bundle:"internal"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *QualityMonitor) UnmarshalJSON(b []byte) error {
|
func (s *QualityMonitor) UnmarshalJSON(b []byte) error {
|
||||||
|
@ -44,3 +48,19 @@ func (s *QualityMonitor) Exists(ctx context.Context, w *databricks.WorkspaceClie
|
||||||
func (s *QualityMonitor) TerraformResourceName() string {
|
func (s *QualityMonitor) TerraformResourceName() string {
|
||||||
return "databricks_quality_monitor"
|
return "databricks_quality_monitor"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *QualityMonitor) InitializeURL(baseURL url.URL) {
|
||||||
|
if s.TableName == "" {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
baseURL.Path = fmt.Sprintf("explore/data/%s", strings.ReplaceAll(s.TableName, ".", "/"))
|
||||||
|
s.URL = baseURL.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *QualityMonitor) GetName() string {
|
||||||
|
return s.TableName
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *QualityMonitor) GetURL() string {
|
||||||
|
return s.URL
|
||||||
|
}
|
||||||
|
|
|
@ -2,6 +2,9 @@ package resources
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"net/url"
|
||||||
|
"strings"
|
||||||
|
|
||||||
"github.com/databricks/cli/libs/log"
|
"github.com/databricks/cli/libs/log"
|
||||||
"github.com/databricks/databricks-sdk-go"
|
"github.com/databricks/databricks-sdk-go"
|
||||||
|
@ -24,6 +27,7 @@ type RegisteredModel struct {
|
||||||
*catalog.CreateRegisteredModelRequest
|
*catalog.CreateRegisteredModelRequest
|
||||||
|
|
||||||
ModifiedStatus ModifiedStatus `json:"modified_status,omitempty" bundle:"internal"`
|
ModifiedStatus ModifiedStatus `json:"modified_status,omitempty" bundle:"internal"`
|
||||||
|
URL string `json:"url,omitempty" bundle:"internal"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *RegisteredModel) UnmarshalJSON(b []byte) error {
|
func (s *RegisteredModel) UnmarshalJSON(b []byte) error {
|
||||||
|
@ -48,3 +52,19 @@ func (s *RegisteredModel) Exists(ctx context.Context, w *databricks.WorkspaceCli
|
||||||
func (s *RegisteredModel) TerraformResourceName() string {
|
func (s *RegisteredModel) TerraformResourceName() string {
|
||||||
return "databricks_registered_model"
|
return "databricks_registered_model"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *RegisteredModel) InitializeURL(baseURL url.URL) {
|
||||||
|
if s.ID == "" {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
baseURL.Path = fmt.Sprintf("explore/data/models/%s", strings.ReplaceAll(s.ID, ".", "/"))
|
||||||
|
s.URL = baseURL.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *RegisteredModel) GetName() string {
|
||||||
|
return s.Name
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *RegisteredModel) GetURL() string {
|
||||||
|
return s.URL
|
||||||
|
}
|
||||||
|
|
|
@ -1,6 +1,12 @@
|
||||||
package resources
|
package resources
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"net/url"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/databricks/databricks-sdk-go"
|
||||||
"github.com/databricks/databricks-sdk-go/marshal"
|
"github.com/databricks/databricks-sdk-go/marshal"
|
||||||
"github.com/databricks/databricks-sdk-go/service/catalog"
|
"github.com/databricks/databricks-sdk-go/service/catalog"
|
||||||
)
|
)
|
||||||
|
@ -16,6 +22,31 @@ type Schema struct {
|
||||||
*catalog.CreateSchema
|
*catalog.CreateSchema
|
||||||
|
|
||||||
ModifiedStatus ModifiedStatus `json:"modified_status,omitempty" bundle:"internal"`
|
ModifiedStatus ModifiedStatus `json:"modified_status,omitempty" bundle:"internal"`
|
||||||
|
URL string `json:"url,omitempty" bundle:"internal"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Schema) Exists(ctx context.Context, w *databricks.WorkspaceClient, id string) (bool, error) {
|
||||||
|
return false, fmt.Errorf("schema.Exists() is not supported")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Schema) TerraformResourceName() string {
|
||||||
|
return "databricks_schema"
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Schema) InitializeURL(baseURL url.URL) {
|
||||||
|
if s.ID == "" {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
baseURL.Path = fmt.Sprintf("explore/data/%s", strings.ReplaceAll(s.ID, ".", "/"))
|
||||||
|
s.URL = baseURL.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Schema) GetURL() string {
|
||||||
|
return s.URL
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Schema) GetName() string {
|
||||||
|
return s.Name
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Schema) UnmarshalJSON(b []byte) error {
|
func (s *Schema) UnmarshalJSON(b []byte) error {
|
||||||
|
|
|
@ -3,6 +3,7 @@ package config
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"reflect"
|
"reflect"
|
||||||
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
|
@ -61,3 +62,38 @@ func TestCustomMarshallerIsImplemented(t *testing.T) {
|
||||||
}, "Resource %s does not have a custom unmarshaller", field.Name)
|
}, "Resource %s does not have a custom unmarshaller", field.Name)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestResourcesAllResourcesCompleteness(t *testing.T) {
|
||||||
|
r := Resources{}
|
||||||
|
rt := reflect.TypeOf(r)
|
||||||
|
|
||||||
|
// Collect set of includes resource types
|
||||||
|
var types []string
|
||||||
|
for _, group := range r.AllResources() {
|
||||||
|
types = append(types, group.Description.PluralName)
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := 0; i < rt.NumField(); i++ {
|
||||||
|
field := rt.Field(i)
|
||||||
|
jsonTag := field.Tag.Get("json")
|
||||||
|
|
||||||
|
if idx := strings.Index(jsonTag, ","); idx != -1 {
|
||||||
|
jsonTag = jsonTag[:idx]
|
||||||
|
}
|
||||||
|
|
||||||
|
assert.Contains(t, types, jsonTag, "Field %s is missing in AllResources", field.Name)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSupportedResources(t *testing.T) {
|
||||||
|
// Please add your resource to the SupportedResources() function in resources.go if you add a new resource.
|
||||||
|
actual := SupportedResources()
|
||||||
|
|
||||||
|
typ := reflect.TypeOf(Resources{})
|
||||||
|
for i := 0; i < typ.NumField(); i++ {
|
||||||
|
field := typ.Field(i)
|
||||||
|
jsonTags := strings.Split(field.Tag.Get("json"), ",")
|
||||||
|
pluralName := jsonTags[0]
|
||||||
|
assert.Equal(t, actual[pluralName].PluralName, pluralName)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -47,7 +47,7 @@ type Workspace struct {
|
||||||
|
|
||||||
// Remote workspace base path for deployment state, for artifacts, as synchronization target.
|
// Remote workspace base path for deployment state, for artifacts, as synchronization target.
|
||||||
// This defaults to "~/.bundle/${bundle.name}/${bundle.target}" where "~" expands to
|
// This defaults to "~/.bundle/${bundle.name}/${bundle.target}" where "~" expands to
|
||||||
// the current user's home directory in the workspace (e.g. `/Users/jane@doe.com`).
|
// the current user's home directory in the workspace (e.g. `/Workspace/Users/jane@doe.com`).
|
||||||
RootPath string `json:"root_path,omitempty"`
|
RootPath string `json:"root_path,omitempty"`
|
||||||
|
|
||||||
// Remote workspace path to synchronize local files to.
|
// Remote workspace path to synchronize local files to.
|
||||||
|
|
|
@ -2,9 +2,12 @@ package files
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"io/fs"
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle"
|
"github.com/databricks/cli/bundle"
|
||||||
|
"github.com/databricks/cli/bundle/permissions"
|
||||||
"github.com/databricks/cli/libs/cmdio"
|
"github.com/databricks/cli/libs/cmdio"
|
||||||
"github.com/databricks/cli/libs/diag"
|
"github.com/databricks/cli/libs/diag"
|
||||||
"github.com/databricks/cli/libs/log"
|
"github.com/databricks/cli/libs/log"
|
||||||
|
@ -35,6 +38,9 @@ func (m *upload) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||||
|
|
||||||
b.Files, err = sync.RunOnce(ctx)
|
b.Files, err = sync.RunOnce(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
if errors.Is(err, fs.ErrPermission) {
|
||||||
|
return permissions.ReportPossiblePermissionDenied(ctx, b, b.Config.Workspace.FilePath)
|
||||||
|
}
|
||||||
return diag.FromErr(err)
|
return diag.FromErr(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -3,8 +3,10 @@ package lock
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
|
"io/fs"
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle"
|
"github.com/databricks/cli/bundle"
|
||||||
|
"github.com/databricks/cli/bundle/permissions"
|
||||||
"github.com/databricks/cli/libs/diag"
|
"github.com/databricks/cli/libs/diag"
|
||||||
"github.com/databricks/cli/libs/filer"
|
"github.com/databricks/cli/libs/filer"
|
||||||
"github.com/databricks/cli/libs/locker"
|
"github.com/databricks/cli/libs/locker"
|
||||||
|
@ -51,12 +53,17 @@ func (m *acquire) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf(ctx, "Failed to acquire deployment lock: %v", err)
|
log.Errorf(ctx, "Failed to acquire deployment lock: %v", err)
|
||||||
|
|
||||||
|
if errors.Is(err, fs.ErrPermission) {
|
||||||
|
return permissions.ReportPossiblePermissionDenied(ctx, b, b.Config.Workspace.StatePath)
|
||||||
|
}
|
||||||
|
|
||||||
notExistsError := filer.NoSuchDirectoryError{}
|
notExistsError := filer.NoSuchDirectoryError{}
|
||||||
if errors.As(err, ¬ExistsError) {
|
if errors.As(err, ¬ExistsError) {
|
||||||
// If we get a "doesn't exist" error from the API this indicates
|
// If we get a "doesn't exist" error from the API this indicates
|
||||||
// we either don't have permissions or the path is invalid.
|
// we either don't have permissions or the path is invalid.
|
||||||
return diag.Errorf("cannot write to deployment root (this can indicate a previous deploy was done with a different identity): %s", b.Config.Workspace.RootPath)
|
return permissions.ReportPossiblePermissionDenied(ctx, b, b.Config.Workspace.StatePath)
|
||||||
}
|
}
|
||||||
|
|
||||||
return diag.FromErr(err)
|
return diag.FromErr(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -10,6 +10,8 @@ import (
|
||||||
"github.com/databricks/cli/libs/log"
|
"github.com/databricks/cli/libs/log"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
const MaxStateFileSize = 10 * 1024 * 1024 // 10MB
|
||||||
|
|
||||||
type statePush struct {
|
type statePush struct {
|
||||||
filerFactory FilerFactory
|
filerFactory FilerFactory
|
||||||
}
|
}
|
||||||
|
@ -35,6 +37,17 @@ func (s *statePush) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostic
|
||||||
}
|
}
|
||||||
defer local.Close()
|
defer local.Close()
|
||||||
|
|
||||||
|
if !b.Config.Bundle.Force {
|
||||||
|
state, err := local.Stat()
|
||||||
|
if err != nil {
|
||||||
|
return diag.FromErr(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if state.Size() > MaxStateFileSize {
|
||||||
|
return diag.Errorf("Deployment state file size exceeds the maximum allowed size of %d bytes. Please reduce the number of resources in your bundle, split your bundle into multiple or re-run the command with --force flag.", MaxStateFileSize)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
log.Infof(ctx, "Writing local deployment state file to remote state directory")
|
log.Infof(ctx, "Writing local deployment state file to remote state directory")
|
||||||
err = f.Write(ctx, DeploymentStateFileName, local, filer.CreateParentDirectories, filer.OverwriteIfExists)
|
err = f.Write(ctx, DeploymentStateFileName, local, filer.CreateParentDirectories, filer.OverwriteIfExists)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -4,6 +4,7 @@ import (
|
||||||
"context"
|
"context"
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle"
|
"github.com/databricks/cli/bundle"
|
||||||
|
"github.com/databricks/cli/bundle/permissions"
|
||||||
"github.com/databricks/cli/libs/diag"
|
"github.com/databricks/cli/libs/diag"
|
||||||
"github.com/databricks/cli/libs/log"
|
"github.com/databricks/cli/libs/log"
|
||||||
"github.com/hashicorp/terraform-exec/tfexec"
|
"github.com/hashicorp/terraform-exec/tfexec"
|
||||||
|
@ -34,6 +35,10 @@ func (w *apply) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||||
// Apply terraform according to the computed plan
|
// Apply terraform according to the computed plan
|
||||||
err := tf.Apply(ctx, tfexec.DirOrPlan(b.Plan.Path))
|
err := tf.Apply(ctx, tfexec.DirOrPlan(b.Plan.Path))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
diags := permissions.TryExtendTerraformPermissionError(ctx, b, err)
|
||||||
|
if diags != nil {
|
||||||
|
return diags
|
||||||
|
}
|
||||||
return diag.Errorf("terraform apply: %v", err)
|
return diag.Errorf("terraform apply: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -47,6 +47,17 @@ func (l *statePush) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostic
|
||||||
}
|
}
|
||||||
defer local.Close()
|
defer local.Close()
|
||||||
|
|
||||||
|
if !b.Config.Bundle.Force {
|
||||||
|
state, err := local.Stat()
|
||||||
|
if err != nil {
|
||||||
|
return diag.FromErr(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if state.Size() > deploy.MaxStateFileSize {
|
||||||
|
return diag.Errorf("Terraform state file size exceeds the maximum allowed size of %d bytes. Please reduce the number of resources in your bundle, split your bundle into multiple or re-run the command with --force flag", deploy.MaxStateFileSize)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Upload state file from local cache directory to filer.
|
// Upload state file from local cache directory to filer.
|
||||||
cmdio.LogString(ctx, "Updating deployment state...")
|
cmdio.LogString(ctx, "Updating deployment state...")
|
||||||
log.Infof(ctx, "Writing local state file to remote state directory")
|
log.Infof(ctx, "Writing local state file to remote state directory")
|
||||||
|
|
|
@ -3,6 +3,7 @@ package terraform
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
@ -59,3 +60,29 @@ func TestStatePush(t *testing.T) {
|
||||||
diags := bundle.Apply(ctx, b, m)
|
diags := bundle.Apply(ctx, b, m)
|
||||||
assert.NoError(t, diags.Error())
|
assert.NoError(t, diags.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestStatePushLargeState(t *testing.T) {
|
||||||
|
mock := mockfiler.NewMockFiler(t)
|
||||||
|
m := &statePush{
|
||||||
|
identityFiler(mock),
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
b := statePushTestBundle(t)
|
||||||
|
|
||||||
|
largeState := map[string]any{}
|
||||||
|
for i := 0; i < 1000000; i++ {
|
||||||
|
largeState[fmt.Sprintf("field_%d", i)] = i
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write a stale local state file.
|
||||||
|
writeLocalState(t, ctx, b, largeState)
|
||||||
|
diags := bundle.Apply(ctx, b, m)
|
||||||
|
assert.ErrorContains(t, diags.Error(), "Terraform state file size exceeds the maximum allowed size of 10485760 bytes. Please reduce the number of resources in your bundle, split your bundle into multiple or re-run the command with --force flag")
|
||||||
|
|
||||||
|
// Force the write.
|
||||||
|
b = statePushTestBundle(t)
|
||||||
|
b.Config.Bundle.Force = true
|
||||||
|
diags = bundle.Apply(ctx, b, m)
|
||||||
|
assert.NoError(t, diags.Error())
|
||||||
|
}
|
||||||
|
|
|
@ -40,7 +40,7 @@ func (clusterConverter) Convert(ctx context.Context, key string, vin dyn.Value,
|
||||||
|
|
||||||
// Configure permissions for this resource.
|
// Configure permissions for this resource.
|
||||||
if permissions := convertPermissionsResource(ctx, vin); permissions != nil {
|
if permissions := convertPermissionsResource(ctx, vin); permissions != nil {
|
||||||
permissions.JobId = fmt.Sprintf("${databricks_cluster.%s.id}", key)
|
permissions.ClusterId = fmt.Sprintf("${databricks_cluster.%s.id}", key)
|
||||||
out.Permissions["cluster_"+key] = permissions
|
out.Permissions["cluster_"+key] = permissions
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -81,7 +81,7 @@ func TestConvertCluster(t *testing.T) {
|
||||||
|
|
||||||
// Assert equality on the permissions
|
// Assert equality on the permissions
|
||||||
assert.Equal(t, &schema.ResourcePermissions{
|
assert.Equal(t, &schema.ResourcePermissions{
|
||||||
JobId: "${databricks_cluster.my_cluster.id}",
|
ClusterId: "${databricks_cluster.my_cluster.id}",
|
||||||
AccessControl: []schema.ResourcePermissionsAccessControl{
|
AccessControl: []schema.ResourcePermissionsAccessControl{
|
||||||
{
|
{
|
||||||
PermissionLevel: "CAN_RUN",
|
PermissionLevel: "CAN_RUN",
|
||||||
|
|
|
@ -8,8 +8,10 @@ import (
|
||||||
"reflect"
|
"reflect"
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle/config"
|
"github.com/databricks/cli/bundle/config"
|
||||||
|
"github.com/databricks/cli/bundle/config/resources"
|
||||||
"github.com/databricks/cli/bundle/config/variable"
|
"github.com/databricks/cli/bundle/config/variable"
|
||||||
"github.com/databricks/cli/libs/jsonschema"
|
"github.com/databricks/cli/libs/jsonschema"
|
||||||
|
"github.com/databricks/databricks-sdk-go/service/jobs"
|
||||||
)
|
)
|
||||||
|
|
||||||
func interpolationPattern(s string) string {
|
func interpolationPattern(s string) string {
|
||||||
|
@ -66,6 +68,31 @@ func addInterpolationPatterns(typ reflect.Type, s jsonschema.Schema) jsonschema.
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func removeJobsFields(typ reflect.Type, s jsonschema.Schema) jsonschema.Schema {
|
||||||
|
switch typ {
|
||||||
|
case reflect.TypeOf(resources.Job{}):
|
||||||
|
// This field has been deprecated in jobs API v2.1 and is always set to
|
||||||
|
// "MULTI_TASK" in the backend. We should not expose it to the user.
|
||||||
|
delete(s.Properties, "format")
|
||||||
|
|
||||||
|
// These fields are only meant to be set by the DABs client (ie the CLI)
|
||||||
|
// and thus should not be exposed to the user. These are used to annotate
|
||||||
|
// jobs that were created by DABs.
|
||||||
|
delete(s.Properties, "deployment")
|
||||||
|
delete(s.Properties, "edit_mode")
|
||||||
|
|
||||||
|
case reflect.TypeOf(jobs.GitSource{}):
|
||||||
|
// These fields are readonly and are not meant to be set by the user.
|
||||||
|
delete(s.Properties, "job_source")
|
||||||
|
delete(s.Properties, "git_snapshot")
|
||||||
|
|
||||||
|
default:
|
||||||
|
// Do nothing
|
||||||
|
}
|
||||||
|
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
if len(os.Args) != 2 {
|
if len(os.Args) != 2 {
|
||||||
fmt.Println("Usage: go run main.go <output-file>")
|
fmt.Println("Usage: go run main.go <output-file>")
|
||||||
|
@ -90,6 +117,7 @@ func main() {
|
||||||
s, err := jsonschema.FromType(reflect.TypeOf(config.Root{}), []func(reflect.Type, jsonschema.Schema) jsonschema.Schema{
|
s, err := jsonschema.FromType(reflect.TypeOf(config.Root{}), []func(reflect.Type, jsonschema.Schema) jsonschema.Schema{
|
||||||
p.addDescriptions,
|
p.addDescriptions,
|
||||||
p.addEnums,
|
p.addEnums,
|
||||||
|
removeJobsFields,
|
||||||
addInterpolationPatterns,
|
addInterpolationPatterns,
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -0,0 +1,4 @@
|
||||||
|
resources:
|
||||||
|
jobs:
|
||||||
|
foo:
|
||||||
|
format: SINGLE_TASK
|
|
@ -0,0 +1,6 @@
|
||||||
|
resources:
|
||||||
|
jobs:
|
||||||
|
foo:
|
||||||
|
deployment:
|
||||||
|
kind: BUNDLE
|
||||||
|
metadata_file_path: /a/b/c
|
|
@ -0,0 +1,6 @@
|
||||||
|
targets:
|
||||||
|
foo:
|
||||||
|
resources:
|
||||||
|
jobs:
|
||||||
|
bar:
|
||||||
|
edit_mode: whatever
|
|
@ -0,0 +1,8 @@
|
||||||
|
resources:
|
||||||
|
jobs:
|
||||||
|
foo:
|
||||||
|
git_source:
|
||||||
|
git_provider: GITHUB
|
||||||
|
git_url: www.whatever.com
|
||||||
|
git_snapshot:
|
||||||
|
used_commit: abcdef
|
|
@ -0,0 +1,9 @@
|
||||||
|
resources:
|
||||||
|
jobs:
|
||||||
|
foo:
|
||||||
|
git_source:
|
||||||
|
git_provider: GITHUB
|
||||||
|
git_url: www.whatever.com
|
||||||
|
job_source:
|
||||||
|
import_from_git_branch: master
|
||||||
|
job_config_path: def
|
|
@ -32,7 +32,6 @@ resources:
|
||||||
name: myjob
|
name: myjob
|
||||||
continuous:
|
continuous:
|
||||||
pause_status: PAUSED
|
pause_status: PAUSED
|
||||||
edit_mode: EDITABLE
|
|
||||||
max_concurrent_runs: 10
|
max_concurrent_runs: 10
|
||||||
description: "my job description"
|
description: "my job description"
|
||||||
email_notifications:
|
email_notifications:
|
||||||
|
@ -43,10 +42,12 @@ resources:
|
||||||
dependencies:
|
dependencies:
|
||||||
- python=3.7
|
- python=3.7
|
||||||
client: "myclient"
|
client: "myclient"
|
||||||
format: MULTI_TASK
|
|
||||||
tags:
|
tags:
|
||||||
foo: bar
|
foo: bar
|
||||||
bar: baz
|
bar: baz
|
||||||
|
git_source:
|
||||||
|
git_provider: gitHub
|
||||||
|
git_url: www.github.com/a/b
|
||||||
tasks:
|
tasks:
|
||||||
- task_key: mytask
|
- task_key: mytask
|
||||||
notebook_task:
|
notebook_task:
|
||||||
|
|
|
@ -1,3 +1,3 @@
|
||||||
package schema
|
package schema
|
||||||
|
|
||||||
const ProviderVersion = "1.52.0"
|
const ProviderVersion = "1.53.0"
|
||||||
|
|
|
@ -10,6 +10,7 @@ type DataSourceCurrentMetastoreMetastoreInfo struct {
|
||||||
DeltaSharingOrganizationName string `json:"delta_sharing_organization_name,omitempty"`
|
DeltaSharingOrganizationName string `json:"delta_sharing_organization_name,omitempty"`
|
||||||
DeltaSharingRecipientTokenLifetimeInSeconds int `json:"delta_sharing_recipient_token_lifetime_in_seconds,omitempty"`
|
DeltaSharingRecipientTokenLifetimeInSeconds int `json:"delta_sharing_recipient_token_lifetime_in_seconds,omitempty"`
|
||||||
DeltaSharingScope string `json:"delta_sharing_scope,omitempty"`
|
DeltaSharingScope string `json:"delta_sharing_scope,omitempty"`
|
||||||
|
ExternalAccessEnabled bool `json:"external_access_enabled,omitempty"`
|
||||||
GlobalMetastoreId string `json:"global_metastore_id,omitempty"`
|
GlobalMetastoreId string `json:"global_metastore_id,omitempty"`
|
||||||
MetastoreId string `json:"metastore_id,omitempty"`
|
MetastoreId string `json:"metastore_id,omitempty"`
|
||||||
Name string `json:"name,omitempty"`
|
Name string `json:"name,omitempty"`
|
||||||
|
|
|
@ -10,6 +10,7 @@ type DataSourceMetastoreMetastoreInfo struct {
|
||||||
DeltaSharingOrganizationName string `json:"delta_sharing_organization_name,omitempty"`
|
DeltaSharingOrganizationName string `json:"delta_sharing_organization_name,omitempty"`
|
||||||
DeltaSharingRecipientTokenLifetimeInSeconds int `json:"delta_sharing_recipient_token_lifetime_in_seconds,omitempty"`
|
DeltaSharingRecipientTokenLifetimeInSeconds int `json:"delta_sharing_recipient_token_lifetime_in_seconds,omitempty"`
|
||||||
DeltaSharingScope string `json:"delta_sharing_scope,omitempty"`
|
DeltaSharingScope string `json:"delta_sharing_scope,omitempty"`
|
||||||
|
ExternalAccessEnabled bool `json:"external_access_enabled,omitempty"`
|
||||||
GlobalMetastoreId string `json:"global_metastore_id,omitempty"`
|
GlobalMetastoreId string `json:"global_metastore_id,omitempty"`
|
||||||
MetastoreId string `json:"metastore_id,omitempty"`
|
MetastoreId string `json:"metastore_id,omitempty"`
|
||||||
Name string `json:"name,omitempty"`
|
Name string `json:"name,omitempty"`
|
||||||
|
|
|
@ -0,0 +1,8 @@
|
||||||
|
// Generated from Databricks Terraform provider schema. DO NOT EDIT.
|
||||||
|
|
||||||
|
package schema
|
||||||
|
|
||||||
|
type DataSourceMlflowModels struct {
|
||||||
|
Id string `json:"id,omitempty"`
|
||||||
|
Names []string `json:"names,omitempty"`
|
||||||
|
}
|
|
@ -30,6 +30,7 @@ type DataSources struct {
|
||||||
Metastores map[string]any `json:"databricks_metastores,omitempty"`
|
Metastores map[string]any `json:"databricks_metastores,omitempty"`
|
||||||
MlflowExperiment map[string]any `json:"databricks_mlflow_experiment,omitempty"`
|
MlflowExperiment map[string]any `json:"databricks_mlflow_experiment,omitempty"`
|
||||||
MlflowModel map[string]any `json:"databricks_mlflow_model,omitempty"`
|
MlflowModel map[string]any `json:"databricks_mlflow_model,omitempty"`
|
||||||
|
MlflowModels map[string]any `json:"databricks_mlflow_models,omitempty"`
|
||||||
MwsCredentials map[string]any `json:"databricks_mws_credentials,omitempty"`
|
MwsCredentials map[string]any `json:"databricks_mws_credentials,omitempty"`
|
||||||
MwsWorkspaces map[string]any `json:"databricks_mws_workspaces,omitempty"`
|
MwsWorkspaces map[string]any `json:"databricks_mws_workspaces,omitempty"`
|
||||||
NodeType map[string]any `json:"databricks_node_type,omitempty"`
|
NodeType map[string]any `json:"databricks_node_type,omitempty"`
|
||||||
|
@ -85,6 +86,7 @@ func NewDataSources() *DataSources {
|
||||||
Metastores: make(map[string]any),
|
Metastores: make(map[string]any),
|
||||||
MlflowExperiment: make(map[string]any),
|
MlflowExperiment: make(map[string]any),
|
||||||
MlflowModel: make(map[string]any),
|
MlflowModel: make(map[string]any),
|
||||||
|
MlflowModels: make(map[string]any),
|
||||||
MwsCredentials: make(map[string]any),
|
MwsCredentials: make(map[string]any),
|
||||||
MwsWorkspaces: make(map[string]any),
|
MwsWorkspaces: make(map[string]any),
|
||||||
NodeType: make(map[string]any),
|
NodeType: make(map[string]any),
|
||||||
|
|
|
@ -0,0 +1,49 @@
|
||||||
|
// Generated from Databricks Terraform provider schema. DO NOT EDIT.
|
||||||
|
|
||||||
|
package schema
|
||||||
|
|
||||||
|
type ResourceBudgetAlertConfigurationsActionConfigurations struct {
|
||||||
|
ActionConfigurationId string `json:"action_configuration_id,omitempty"`
|
||||||
|
ActionType string `json:"action_type,omitempty"`
|
||||||
|
Target string `json:"target,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type ResourceBudgetAlertConfigurations struct {
|
||||||
|
AlertConfigurationId string `json:"alert_configuration_id,omitempty"`
|
||||||
|
QuantityThreshold string `json:"quantity_threshold,omitempty"`
|
||||||
|
QuantityType string `json:"quantity_type,omitempty"`
|
||||||
|
TimePeriod string `json:"time_period,omitempty"`
|
||||||
|
TriggerType string `json:"trigger_type,omitempty"`
|
||||||
|
ActionConfigurations []ResourceBudgetAlertConfigurationsActionConfigurations `json:"action_configurations,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type ResourceBudgetFilterTagsValue struct {
|
||||||
|
Operator string `json:"operator,omitempty"`
|
||||||
|
Values []string `json:"values,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type ResourceBudgetFilterTags struct {
|
||||||
|
Key string `json:"key,omitempty"`
|
||||||
|
Value *ResourceBudgetFilterTagsValue `json:"value,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type ResourceBudgetFilterWorkspaceId struct {
|
||||||
|
Operator string `json:"operator,omitempty"`
|
||||||
|
Values []int `json:"values,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type ResourceBudgetFilter struct {
|
||||||
|
Tags []ResourceBudgetFilterTags `json:"tags,omitempty"`
|
||||||
|
WorkspaceId *ResourceBudgetFilterWorkspaceId `json:"workspace_id,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type ResourceBudget struct {
|
||||||
|
AccountId string `json:"account_id,omitempty"`
|
||||||
|
BudgetConfigurationId string `json:"budget_configuration_id,omitempty"`
|
||||||
|
CreateTime int `json:"create_time,omitempty"`
|
||||||
|
DisplayName string `json:"display_name,omitempty"`
|
||||||
|
Id string `json:"id,omitempty"`
|
||||||
|
UpdateTime int `json:"update_time,omitempty"`
|
||||||
|
AlertConfigurations []ResourceBudgetAlertConfigurations `json:"alert_configurations,omitempty"`
|
||||||
|
Filter *ResourceBudgetFilter `json:"filter,omitempty"`
|
||||||
|
}
|
|
@ -2,6 +2,57 @@
|
||||||
|
|
||||||
package schema
|
package schema
|
||||||
|
|
||||||
|
type ResourceModelServingAiGatewayGuardrailsInputPii struct {
|
||||||
|
Behavior string `json:"behavior"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type ResourceModelServingAiGatewayGuardrailsInput struct {
|
||||||
|
InvalidKeywords []string `json:"invalid_keywords,omitempty"`
|
||||||
|
Safety bool `json:"safety,omitempty"`
|
||||||
|
ValidTopics []string `json:"valid_topics,omitempty"`
|
||||||
|
Pii *ResourceModelServingAiGatewayGuardrailsInputPii `json:"pii,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type ResourceModelServingAiGatewayGuardrailsOutputPii struct {
|
||||||
|
Behavior string `json:"behavior"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type ResourceModelServingAiGatewayGuardrailsOutput struct {
|
||||||
|
InvalidKeywords []string `json:"invalid_keywords,omitempty"`
|
||||||
|
Safety bool `json:"safety,omitempty"`
|
||||||
|
ValidTopics []string `json:"valid_topics,omitempty"`
|
||||||
|
Pii *ResourceModelServingAiGatewayGuardrailsOutputPii `json:"pii,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type ResourceModelServingAiGatewayGuardrails struct {
|
||||||
|
Input *ResourceModelServingAiGatewayGuardrailsInput `json:"input,omitempty"`
|
||||||
|
Output *ResourceModelServingAiGatewayGuardrailsOutput `json:"output,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type ResourceModelServingAiGatewayInferenceTableConfig struct {
|
||||||
|
CatalogName string `json:"catalog_name,omitempty"`
|
||||||
|
Enabled bool `json:"enabled,omitempty"`
|
||||||
|
SchemaName string `json:"schema_name,omitempty"`
|
||||||
|
TableNamePrefix string `json:"table_name_prefix,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type ResourceModelServingAiGatewayRateLimits struct {
|
||||||
|
Calls int `json:"calls"`
|
||||||
|
Key string `json:"key,omitempty"`
|
||||||
|
RenewalPeriod string `json:"renewal_period"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type ResourceModelServingAiGatewayUsageTrackingConfig struct {
|
||||||
|
Enabled bool `json:"enabled,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type ResourceModelServingAiGateway struct {
|
||||||
|
Guardrails *ResourceModelServingAiGatewayGuardrails `json:"guardrails,omitempty"`
|
||||||
|
InferenceTableConfig *ResourceModelServingAiGatewayInferenceTableConfig `json:"inference_table_config,omitempty"`
|
||||||
|
RateLimits []ResourceModelServingAiGatewayRateLimits `json:"rate_limits,omitempty"`
|
||||||
|
UsageTrackingConfig *ResourceModelServingAiGatewayUsageTrackingConfig `json:"usage_tracking_config,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
type ResourceModelServingConfigAutoCaptureConfig struct {
|
type ResourceModelServingConfigAutoCaptureConfig struct {
|
||||||
CatalogName string `json:"catalog_name,omitempty"`
|
CatalogName string `json:"catalog_name,omitempty"`
|
||||||
Enabled bool `json:"enabled,omitempty"`
|
Enabled bool `json:"enabled,omitempty"`
|
||||||
|
@ -139,6 +190,7 @@ type ResourceModelServing struct {
|
||||||
Name string `json:"name"`
|
Name string `json:"name"`
|
||||||
RouteOptimized bool `json:"route_optimized,omitempty"`
|
RouteOptimized bool `json:"route_optimized,omitempty"`
|
||||||
ServingEndpointId string `json:"serving_endpoint_id,omitempty"`
|
ServingEndpointId string `json:"serving_endpoint_id,omitempty"`
|
||||||
|
AiGateway *ResourceModelServingAiGateway `json:"ai_gateway,omitempty"`
|
||||||
Config *ResourceModelServingConfig `json:"config,omitempty"`
|
Config *ResourceModelServingConfig `json:"config,omitempty"`
|
||||||
RateLimits []ResourceModelServingRateLimits `json:"rate_limits,omitempty"`
|
RateLimits []ResourceModelServingRateLimits `json:"rate_limits,omitempty"`
|
||||||
Tags []ResourceModelServingTags `json:"tags,omitempty"`
|
Tags []ResourceModelServingTags `json:"tags,omitempty"`
|
||||||
|
|
|
@ -4,7 +4,7 @@ package schema
|
||||||
|
|
||||||
type ResourcePermissionsAccessControl struct {
|
type ResourcePermissionsAccessControl struct {
|
||||||
GroupName string `json:"group_name,omitempty"`
|
GroupName string `json:"group_name,omitempty"`
|
||||||
PermissionLevel string `json:"permission_level"`
|
PermissionLevel string `json:"permission_level,omitempty"`
|
||||||
ServicePrincipalName string `json:"service_principal_name,omitempty"`
|
ServicePrincipalName string `json:"service_principal_name,omitempty"`
|
||||||
UserName string `json:"user_name,omitempty"`
|
UserName string `json:"user_name,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
|
@ -238,6 +238,7 @@ type ResourcePipelineTrigger struct {
|
||||||
|
|
||||||
type ResourcePipeline struct {
|
type ResourcePipeline struct {
|
||||||
AllowDuplicateNames bool `json:"allow_duplicate_names,omitempty"`
|
AllowDuplicateNames bool `json:"allow_duplicate_names,omitempty"`
|
||||||
|
BudgetPolicyId string `json:"budget_policy_id,omitempty"`
|
||||||
Catalog string `json:"catalog,omitempty"`
|
Catalog string `json:"catalog,omitempty"`
|
||||||
Cause string `json:"cause,omitempty"`
|
Cause string `json:"cause,omitempty"`
|
||||||
Channel string `json:"channel,omitempty"`
|
Channel string `json:"channel,omitempty"`
|
||||||
|
@ -254,6 +255,7 @@ type ResourcePipeline struct {
|
||||||
Name string `json:"name,omitempty"`
|
Name string `json:"name,omitempty"`
|
||||||
Photon bool `json:"photon,omitempty"`
|
Photon bool `json:"photon,omitempty"`
|
||||||
RunAsUserName string `json:"run_as_user_name,omitempty"`
|
RunAsUserName string `json:"run_as_user_name,omitempty"`
|
||||||
|
Schema string `json:"schema,omitempty"`
|
||||||
Serverless bool `json:"serverless,omitempty"`
|
Serverless bool `json:"serverless,omitempty"`
|
||||||
State string `json:"state,omitempty"`
|
State string `json:"state,omitempty"`
|
||||||
Storage string `json:"storage,omitempty"`
|
Storage string `json:"storage,omitempty"`
|
||||||
|
|
|
@ -4,9 +4,11 @@ package schema
|
||||||
|
|
||||||
type ResourceSqlTableColumn struct {
|
type ResourceSqlTableColumn struct {
|
||||||
Comment string `json:"comment,omitempty"`
|
Comment string `json:"comment,omitempty"`
|
||||||
|
Identity string `json:"identity,omitempty"`
|
||||||
Name string `json:"name"`
|
Name string `json:"name"`
|
||||||
Nullable bool `json:"nullable,omitempty"`
|
Nullable bool `json:"nullable,omitempty"`
|
||||||
Type string `json:"type,omitempty"`
|
Type string `json:"type,omitempty"`
|
||||||
|
TypeJson string `json:"type_json,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type ResourceSqlTable struct {
|
type ResourceSqlTable struct {
|
||||||
|
|
|
@ -10,6 +10,7 @@ type Resources struct {
|
||||||
AzureAdlsGen1Mount map[string]any `json:"databricks_azure_adls_gen1_mount,omitempty"`
|
AzureAdlsGen1Mount map[string]any `json:"databricks_azure_adls_gen1_mount,omitempty"`
|
||||||
AzureAdlsGen2Mount map[string]any `json:"databricks_azure_adls_gen2_mount,omitempty"`
|
AzureAdlsGen2Mount map[string]any `json:"databricks_azure_adls_gen2_mount,omitempty"`
|
||||||
AzureBlobMount map[string]any `json:"databricks_azure_blob_mount,omitempty"`
|
AzureBlobMount map[string]any `json:"databricks_azure_blob_mount,omitempty"`
|
||||||
|
Budget map[string]any `json:"databricks_budget,omitempty"`
|
||||||
Catalog map[string]any `json:"databricks_catalog,omitempty"`
|
Catalog map[string]any `json:"databricks_catalog,omitempty"`
|
||||||
CatalogWorkspaceBinding map[string]any `json:"databricks_catalog_workspace_binding,omitempty"`
|
CatalogWorkspaceBinding map[string]any `json:"databricks_catalog_workspace_binding,omitempty"`
|
||||||
Cluster map[string]any `json:"databricks_cluster,omitempty"`
|
Cluster map[string]any `json:"databricks_cluster,omitempty"`
|
||||||
|
@ -112,6 +113,7 @@ func NewResources() *Resources {
|
||||||
AzureAdlsGen1Mount: make(map[string]any),
|
AzureAdlsGen1Mount: make(map[string]any),
|
||||||
AzureAdlsGen2Mount: make(map[string]any),
|
AzureAdlsGen2Mount: make(map[string]any),
|
||||||
AzureBlobMount: make(map[string]any),
|
AzureBlobMount: make(map[string]any),
|
||||||
|
Budget: make(map[string]any),
|
||||||
Catalog: make(map[string]any),
|
Catalog: make(map[string]any),
|
||||||
CatalogWorkspaceBinding: make(map[string]any),
|
CatalogWorkspaceBinding: make(map[string]any),
|
||||||
Cluster: make(map[string]any),
|
Cluster: make(map[string]any),
|
||||||
|
|
|
@ -21,7 +21,7 @@ type Root struct {
|
||||||
|
|
||||||
const ProviderHost = "registry.terraform.io"
|
const ProviderHost = "registry.terraform.io"
|
||||||
const ProviderSource = "databricks/databricks"
|
const ProviderSource = "databricks/databricks"
|
||||||
const ProviderVersion = "1.52.0"
|
const ProviderVersion = "1.53.0"
|
||||||
|
|
||||||
func NewRoot() *Root {
|
func NewRoot() *Root {
|
||||||
return &Root{
|
return &Root{
|
||||||
|
|
|
@ -0,0 +1,110 @@
|
||||||
|
package permissions
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"sort"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/databricks/cli/bundle"
|
||||||
|
"github.com/databricks/cli/libs/diag"
|
||||||
|
"github.com/databricks/cli/libs/dyn"
|
||||||
|
"github.com/databricks/cli/libs/set"
|
||||||
|
)
|
||||||
|
|
||||||
|
type permissionDiagnostics struct{}
|
||||||
|
|
||||||
|
func PermissionDiagnostics() bundle.Mutator {
|
||||||
|
return &permissionDiagnostics{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *permissionDiagnostics) Name() string {
|
||||||
|
return "CheckPermissions"
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *permissionDiagnostics) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||||
|
if len(b.Config.Permissions) == 0 {
|
||||||
|
// Only warn if there is an explicit top-level permissions section
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
canManageBundle, _ := analyzeBundlePermissions(b)
|
||||||
|
if canManageBundle {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return diag.Diagnostics{{
|
||||||
|
Severity: diag.Warning,
|
||||||
|
Summary: fmt.Sprintf("permissions section should include %s or one of their groups with CAN_MANAGE permissions", b.Config.Workspace.CurrentUser.UserName),
|
||||||
|
Locations: []dyn.Location{b.Config.GetLocation("permissions")},
|
||||||
|
ID: diag.PermissionNotIncluded,
|
||||||
|
}}
|
||||||
|
}
|
||||||
|
|
||||||
|
// analyzeBundlePermissions analyzes the top-level permissions of the bundle.
|
||||||
|
// This permission set is important since it determines the permissions of the
|
||||||
|
// target workspace folder.
|
||||||
|
//
|
||||||
|
// Returns:
|
||||||
|
// - isManager: true if the current user is can manage the bundle resources.
|
||||||
|
// - assistance: advice on who to contact as to manage this project
|
||||||
|
func analyzeBundlePermissions(b *bundle.Bundle) (bool, string) {
|
||||||
|
canManageBundle := false
|
||||||
|
otherManagers := set.NewSet[string]()
|
||||||
|
if b.Config.RunAs != nil && b.Config.RunAs.UserName != "" && b.Config.RunAs.UserName != b.Config.Workspace.CurrentUser.UserName {
|
||||||
|
// The run_as user is another human that could be contacted
|
||||||
|
// about this bundle.
|
||||||
|
otherManagers.Add(b.Config.RunAs.UserName)
|
||||||
|
}
|
||||||
|
|
||||||
|
currentUser := b.Config.Workspace.CurrentUser.UserName
|
||||||
|
targetPermissions := b.Config.Permissions
|
||||||
|
for _, p := range targetPermissions {
|
||||||
|
if p.Level != CAN_MANAGE {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if p.UserName == currentUser || p.ServicePrincipalName == currentUser {
|
||||||
|
canManageBundle = true
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if isGroupOfCurrentUser(b, p.GroupName) {
|
||||||
|
canManageBundle = true
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Permission doesn't apply to current user; add to otherManagers
|
||||||
|
otherManager := p.UserName
|
||||||
|
if otherManager == "" {
|
||||||
|
otherManager = p.GroupName
|
||||||
|
}
|
||||||
|
if otherManager == "" {
|
||||||
|
// Skip service principals
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
otherManagers.Add(otherManager)
|
||||||
|
}
|
||||||
|
|
||||||
|
assistance := "For assistance, contact the owners of this project."
|
||||||
|
if otherManagers.Size() > 0 {
|
||||||
|
list := otherManagers.Values()
|
||||||
|
sort.Strings(list)
|
||||||
|
assistance = fmt.Sprintf(
|
||||||
|
"For assistance, users or groups with appropriate permissions may include: %s.",
|
||||||
|
strings.Join(list, ", "),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
return canManageBundle, assistance
|
||||||
|
}
|
||||||
|
|
||||||
|
func isGroupOfCurrentUser(b *bundle.Bundle, groupName string) bool {
|
||||||
|
currentUserGroups := b.Config.Workspace.CurrentUser.User.Groups
|
||||||
|
|
||||||
|
for _, g := range currentUserGroups {
|
||||||
|
if g.Display == groupName {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
|
@ -0,0 +1,52 @@
|
||||||
|
package permissions_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/databricks/cli/bundle"
|
||||||
|
"github.com/databricks/cli/bundle/config"
|
||||||
|
"github.com/databricks/cli/bundle/config/resources"
|
||||||
|
"github.com/databricks/cli/bundle/permissions"
|
||||||
|
"github.com/databricks/cli/libs/diag"
|
||||||
|
"github.com/databricks/databricks-sdk-go/service/iam"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestPermissionDiagnosticsApplySuccess(t *testing.T) {
|
||||||
|
b := mockBundle([]resources.Permission{
|
||||||
|
{Level: "CAN_MANAGE", UserName: "testuser@databricks.com"},
|
||||||
|
})
|
||||||
|
|
||||||
|
diags := permissions.PermissionDiagnostics().Apply(context.Background(), b)
|
||||||
|
require.NoError(t, diags.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPermissionDiagnosticsApplyFail(t *testing.T) {
|
||||||
|
b := mockBundle([]resources.Permission{
|
||||||
|
{Level: "CAN_VIEW", UserName: "testuser@databricks.com"},
|
||||||
|
})
|
||||||
|
|
||||||
|
diags := permissions.PermissionDiagnostics().Apply(context.Background(), b)
|
||||||
|
require.Equal(t, diags[0].Severity, diag.Warning)
|
||||||
|
require.Contains(t, diags[0].Summary, "permissions section should include testuser@databricks.com or one of their groups with CAN_MANAGE permissions")
|
||||||
|
}
|
||||||
|
|
||||||
|
func mockBundle(permissions []resources.Permission) *bundle.Bundle {
|
||||||
|
return &bundle.Bundle{
|
||||||
|
Config: config.Root{
|
||||||
|
Workspace: config.Workspace{
|
||||||
|
CurrentUser: &config.User{
|
||||||
|
User: &iam.User{
|
||||||
|
UserName: "testuser@databricks.com",
|
||||||
|
DisplayName: "Test User",
|
||||||
|
Groups: []iam.ComplexValue{
|
||||||
|
{Display: "testgroup"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Permissions: permissions,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,52 @@
|
||||||
|
package permissions
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/databricks/cli/bundle"
|
||||||
|
"github.com/databricks/cli/libs/diag"
|
||||||
|
"github.com/databricks/cli/libs/iamutil"
|
||||||
|
"github.com/databricks/cli/libs/log"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ReportPossiblePermissionDenied generates a diagnostic message when a permission denied error is encountered.
|
||||||
|
//
|
||||||
|
// Note that since the workspace API doesn't always distinguish between permission denied and path errors,
|
||||||
|
// we must treat this as a "possible permission error". See acquire.go for more about this.
|
||||||
|
func ReportPossiblePermissionDenied(ctx context.Context, b *bundle.Bundle, path string) diag.Diagnostics {
|
||||||
|
log.Errorf(ctx, "Failed to update, encountered possible permission error: %v", path)
|
||||||
|
|
||||||
|
me := b.Config.Workspace.CurrentUser.User
|
||||||
|
userName := me.UserName
|
||||||
|
if iamutil.IsServicePrincipal(me) {
|
||||||
|
userName = me.DisplayName
|
||||||
|
}
|
||||||
|
canManageBundle, assistance := analyzeBundlePermissions(b)
|
||||||
|
|
||||||
|
if !canManageBundle {
|
||||||
|
return diag.Diagnostics{{
|
||||||
|
Summary: fmt.Sprintf("unable to deploy to %s as %s.\n"+
|
||||||
|
"Please make sure the current user or one of their groups is listed under the permissions of this bundle.\n"+
|
||||||
|
"%s\n"+
|
||||||
|
"They may need to redeploy the bundle to apply the new permissions.\n"+
|
||||||
|
"Please refer to https://docs.databricks.com/dev-tools/bundles/permissions.html for more on managing permissions.",
|
||||||
|
path, userName, assistance),
|
||||||
|
Severity: diag.Error,
|
||||||
|
ID: diag.PathPermissionDenied,
|
||||||
|
}}
|
||||||
|
}
|
||||||
|
|
||||||
|
// According databricks.yml, the current user has the right permissions.
|
||||||
|
// But we're still seeing permission errors. So someone else will need
|
||||||
|
// to redeploy the bundle with the right set of permissions.
|
||||||
|
return diag.Diagnostics{{
|
||||||
|
Summary: fmt.Sprintf("unable to deploy to %s as %s. Cannot apply local deployment permissions.\n"+
|
||||||
|
"%s\n"+
|
||||||
|
"They can redeploy the project to apply the latest set of permissions.\n"+
|
||||||
|
"Please refer to https://docs.databricks.com/dev-tools/bundles/permissions.html for more on managing permissions.",
|
||||||
|
path, userName, assistance),
|
||||||
|
Severity: diag.Error,
|
||||||
|
ID: diag.CannotChangePathPermissions,
|
||||||
|
}}
|
||||||
|
}
|
|
@ -0,0 +1,76 @@
|
||||||
|
package permissions_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/databricks/cli/bundle/config/resources"
|
||||||
|
"github.com/databricks/cli/bundle/permissions"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestPermissionsReportPermissionDeniedWithGroup(t *testing.T) {
|
||||||
|
b := mockBundle([]resources.Permission{
|
||||||
|
{Level: "CAN_MANAGE", GroupName: "testgroup"},
|
||||||
|
})
|
||||||
|
|
||||||
|
diags := permissions.ReportPossiblePermissionDenied(context.Background(), b, "testpath")
|
||||||
|
expected := "EPERM3: unable to deploy to testpath as testuser@databricks.com. Cannot apply local deployment permissions.\n" +
|
||||||
|
"For assistance, contact the owners of this project.\n" +
|
||||||
|
"They can redeploy the project to apply the latest set of permissions.\n" +
|
||||||
|
"Please refer to https://docs.databricks.com/dev-tools/bundles/permissions.html for more on managing permissions."
|
||||||
|
require.ErrorContains(t, diags.Error(), expected)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPermissionsReportPermissionDeniedWithOtherGroup(t *testing.T) {
|
||||||
|
b := mockBundle([]resources.Permission{
|
||||||
|
{Level: "CAN_MANAGE", GroupName: "othergroup"},
|
||||||
|
})
|
||||||
|
|
||||||
|
diags := permissions.ReportPossiblePermissionDenied(context.Background(), b, "testpath")
|
||||||
|
expected := "EPERM1: unable to deploy to testpath as testuser@databricks.com.\n" +
|
||||||
|
"Please make sure the current user or one of their groups is listed under the permissions of this bundle.\n" +
|
||||||
|
"For assistance, users or groups with appropriate permissions may include: othergroup.\n" +
|
||||||
|
"They may need to redeploy the bundle to apply the new permissions.\n" +
|
||||||
|
"Please refer to https://docs.databricks.com/dev-tools/bundles/permissions.html for more on managing permissions."
|
||||||
|
require.ErrorContains(t, diags.Error(), expected)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPermissionsReportPermissionDeniedWithoutPermission(t *testing.T) {
|
||||||
|
b := mockBundle([]resources.Permission{
|
||||||
|
{Level: "CAN_VIEW", UserName: "testuser@databricks.com"},
|
||||||
|
})
|
||||||
|
|
||||||
|
diags := permissions.ReportPossiblePermissionDenied(context.Background(), b, "testpath")
|
||||||
|
expected := "EPERM1: unable to deploy to testpath as testuser@databricks.com.\n" +
|
||||||
|
"Please make sure the current user or one of their groups is listed under the permissions of this bundle.\n" +
|
||||||
|
"For assistance, contact the owners of this project.\n" +
|
||||||
|
"They may need to redeploy the bundle to apply the new permissions.\n" +
|
||||||
|
"Please refer to https://docs.databricks.com/dev-tools/bundles/permissions.html for more on managing permissions."
|
||||||
|
require.ErrorContains(t, diags.Error(), expected)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPermissionsReportPermissionDeniedNilPermission(t *testing.T) {
|
||||||
|
b := mockBundle(nil)
|
||||||
|
|
||||||
|
diags := permissions.ReportPossiblePermissionDenied(context.Background(), b, "testpath")
|
||||||
|
expected := "EPERM1: unable to deploy to testpath as testuser@databricks.com.\n" +
|
||||||
|
"Please make sure the current user or one of their groups is listed under the permissions of this bundle.\n" +
|
||||||
|
"For assistance, contact the owners of this project.\n" +
|
||||||
|
"They may need to redeploy the bundle to apply the new permissions.\n" +
|
||||||
|
"Please refer to https://docs.databricks.com/dev-tools/bundles/permissions.html for more on managing permissions"
|
||||||
|
require.ErrorContains(t, diags.Error(), expected)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPermissionsReportFindOtherOwners(t *testing.T) {
|
||||||
|
b := mockBundle([]resources.Permission{
|
||||||
|
{Level: "CAN_MANAGE", GroupName: "testgroup"},
|
||||||
|
{Level: "CAN_MANAGE", UserName: "alice@databricks.com"},
|
||||||
|
})
|
||||||
|
|
||||||
|
diags := permissions.ReportPossiblePermissionDenied(context.Background(), b, "testpath")
|
||||||
|
require.ErrorContains(t, diags.Error(), "EPERM3: unable to deploy to testpath as testuser@databricks.com. Cannot apply local deployment permissions.\n"+
|
||||||
|
"For assistance, users or groups with appropriate permissions may include: alice@databricks.com.\n"+
|
||||||
|
"They can redeploy the project to apply the latest set of permissions.\n"+
|
||||||
|
"Please refer to https://docs.databricks.com/dev-tools/bundles/permissions.html for more on managing permissions.")
|
||||||
|
}
|
|
@ -0,0 +1,47 @@
|
||||||
|
package permissions
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"regexp"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/databricks/cli/bundle"
|
||||||
|
"github.com/databricks/cli/libs/diag"
|
||||||
|
"github.com/databricks/cli/libs/log"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TryExtendTerraformPermissionError(ctx context.Context, b *bundle.Bundle, err error) diag.Diagnostics {
|
||||||
|
_, assistance := analyzeBundlePermissions(b)
|
||||||
|
|
||||||
|
// In a best-effort attempt to provide actionable error messages, we match
|
||||||
|
// against a few specific error messages that come from the Jobs and Pipelines API.
|
||||||
|
// For matching errors we provide a more specific error message that includes
|
||||||
|
// details on how to resolve the issue.
|
||||||
|
if !strings.Contains(err.Error(), "cannot update permissions") &&
|
||||||
|
!strings.Contains(err.Error(), "permissions on pipeline") &&
|
||||||
|
!strings.Contains(err.Error(), "cannot read permissions") &&
|
||||||
|
!strings.Contains(err.Error(), "cannot set run_as to user") {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Errorf(ctx, "Terraform error during deployment: %v", err.Error())
|
||||||
|
|
||||||
|
// Best-effort attempt to extract the resource name from the error message.
|
||||||
|
re := regexp.MustCompile(`databricks_(\w*)\.(\w*)`)
|
||||||
|
match := re.FindStringSubmatch(err.Error())
|
||||||
|
resource := "resource"
|
||||||
|
if len(match) > 1 {
|
||||||
|
resource = match[2]
|
||||||
|
}
|
||||||
|
|
||||||
|
return diag.Diagnostics{{
|
||||||
|
Summary: fmt.Sprintf("permission denied creating or updating %s.\n"+
|
||||||
|
"%s\n"+
|
||||||
|
"They can redeploy the project to apply the latest set of permissions.\n"+
|
||||||
|
"Please refer to https://docs.databricks.com/dev-tools/bundles/permissions.html for more on managing permissions.",
|
||||||
|
resource, assistance),
|
||||||
|
Severity: diag.Error,
|
||||||
|
ID: diag.ResourcePermissionDenied,
|
||||||
|
}}
|
||||||
|
}
|
|
@ -0,0 +1,97 @@
|
||||||
|
package permissions_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/databricks/cli/bundle/config/resources"
|
||||||
|
"github.com/databricks/cli/bundle/permissions"
|
||||||
|
"github.com/databricks/databricks-sdk-go/service/jobs"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestTryExtendTerraformPermissionError1(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
b := mockBundle([]resources.Permission{
|
||||||
|
{Level: "CAN_MANAGE", UserName: "alice@databricks.com"},
|
||||||
|
})
|
||||||
|
err := permissions.TryExtendTerraformPermissionError(ctx, b, errors.New("Error: terraform apply: exit status 1\n"+
|
||||||
|
"\n"+
|
||||||
|
"Error: cannot update permissions: ...\n"+
|
||||||
|
"\n"+
|
||||||
|
" with databricks_pipeline.my_project_pipeline,\n"+
|
||||||
|
" on bundle.tf.json line 39, in resource.databricks_pipeline.my_project_pipeline:\n"+
|
||||||
|
" 39: }")).Error()
|
||||||
|
|
||||||
|
expected := "EPERM2: permission denied creating or updating my_project_pipeline.\n" +
|
||||||
|
"For assistance, users or groups with appropriate permissions may include: alice@databricks.com.\n" +
|
||||||
|
"They can redeploy the project to apply the latest set of permissions.\n" +
|
||||||
|
"Please refer to https://docs.databricks.com/dev-tools/bundles/permissions.html for more on managing permissions"
|
||||||
|
|
||||||
|
require.ErrorContains(t, err, expected)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestTryExtendTerraformPermissionError2(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
b := mockBundle([]resources.Permission{
|
||||||
|
{Level: "CAN_MANAGE", UserName: "alice@databricks.com"},
|
||||||
|
{Level: "CAN_MANAGE", UserName: "bob@databricks.com"},
|
||||||
|
})
|
||||||
|
err := permissions.TryExtendTerraformPermissionError(ctx, b, errors.New("Error: terraform apply: exit status 1\n"+
|
||||||
|
"\n"+
|
||||||
|
"Error: cannot read pipeline: User xyz does not have View permissions on pipeline 4521dbb6-42aa-418c-b94d-b5f4859a3454.\n"+
|
||||||
|
"\n"+
|
||||||
|
" with databricks_pipeline.my_project_pipeline,\n"+
|
||||||
|
" on bundle.tf.json line 39, in resource.databricks_pipeline.my_project_pipeline:\n"+
|
||||||
|
" 39: }")).Error()
|
||||||
|
|
||||||
|
expected := "EPERM2: permission denied creating or updating my_project_pipeline.\n" +
|
||||||
|
"For assistance, users or groups with appropriate permissions may include: alice@databricks.com, bob@databricks.com.\n" +
|
||||||
|
"They can redeploy the project to apply the latest set of permissions.\n" +
|
||||||
|
"Please refer to https://docs.databricks.com/dev-tools/bundles/permissions.html for more on managing permissions."
|
||||||
|
require.ErrorContains(t, err, expected)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestTryExtendTerraformPermissionError3(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
b := mockBundle([]resources.Permission{
|
||||||
|
{Level: "CAN_MANAGE", UserName: "testuser@databricks.com"},
|
||||||
|
})
|
||||||
|
err := permissions.TryExtendTerraformPermissionError(ctx, b, errors.New("Error: terraform apply: exit status 1\n"+
|
||||||
|
"\n"+
|
||||||
|
"Error: cannot read permissions: 1706906c-c0a2-4c25-9f57-3a7aa3cb8b90 does not have Owner permissions on Job with ID: ElasticJobId(28263044278868). Please contact the owner or an administrator for access.\n"+
|
||||||
|
"\n"+
|
||||||
|
" with databricks_pipeline.my_project_pipeline,\n"+
|
||||||
|
" on bundle.tf.json line 39, in resource.databricks_pipeline.my_project_pipeline:\n"+
|
||||||
|
" 39: }")).Error()
|
||||||
|
|
||||||
|
expected := "EPERM2: permission denied creating or updating my_project_pipeline.\n" +
|
||||||
|
"For assistance, contact the owners of this project.\n" +
|
||||||
|
"They can redeploy the project to apply the latest set of permissions.\n" +
|
||||||
|
"Please refer to https://docs.databricks.com/dev-tools/bundles/permissions.html for more on managing permissions."
|
||||||
|
require.ErrorContains(t, err, expected)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestTryExtendTerraformPermissionErrorNotOwner(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
b := mockBundle([]resources.Permission{
|
||||||
|
{Level: "CAN_MANAGE", GroupName: "data_team@databricks.com"},
|
||||||
|
})
|
||||||
|
b.Config.RunAs = &jobs.JobRunAs{
|
||||||
|
UserName: "testuser@databricks.com",
|
||||||
|
}
|
||||||
|
err := permissions.TryExtendTerraformPermissionError(ctx, b, errors.New("Error: terraform apply: exit status 1\n"+
|
||||||
|
"\n"+
|
||||||
|
"Error: cannot read pipeline: User xyz does not have View permissions on pipeline 4521dbb6-42aa-418c-b94d-b5f4859a3454.\n"+
|
||||||
|
"\n"+
|
||||||
|
" with databricks_pipeline.my_project_pipeline,\n"+
|
||||||
|
" on bundle.tf.json line 39, in resource.databricks_pipeline.my_project_pipeline:\n"+
|
||||||
|
" 39: }")).Error()
|
||||||
|
|
||||||
|
expected := "EPERM2: permission denied creating or updating my_project_pipeline.\n" +
|
||||||
|
"For assistance, users or groups with appropriate permissions may include: data_team@databricks.com.\n" +
|
||||||
|
"They can redeploy the project to apply the latest set of permissions.\n" +
|
||||||
|
"Please refer to https://docs.databricks.com/dev-tools/bundles/permissions.html for more on managing permissions."
|
||||||
|
require.ErrorContains(t, err, expected)
|
||||||
|
}
|
|
@ -39,9 +39,16 @@ func Initialize() bundle.Mutator {
|
||||||
mutator.MergePipelineClusters(),
|
mutator.MergePipelineClusters(),
|
||||||
mutator.InitializeWorkspaceClient(),
|
mutator.InitializeWorkspaceClient(),
|
||||||
mutator.PopulateCurrentUser(),
|
mutator.PopulateCurrentUser(),
|
||||||
|
|
||||||
mutator.DefineDefaultWorkspaceRoot(),
|
mutator.DefineDefaultWorkspaceRoot(),
|
||||||
mutator.ExpandWorkspaceRoot(),
|
mutator.ExpandWorkspaceRoot(),
|
||||||
mutator.DefineDefaultWorkspacePaths(),
|
mutator.DefineDefaultWorkspacePaths(),
|
||||||
|
mutator.PrependWorkspacePrefix(),
|
||||||
|
|
||||||
|
// This mutator needs to be run before variable interpolation because it
|
||||||
|
// searches for strings with variable references in them.
|
||||||
|
mutator.RewriteWorkspacePrefix(),
|
||||||
|
|
||||||
mutator.SetVariables(),
|
mutator.SetVariables(),
|
||||||
// Intentionally placed before ResolveVariableReferencesInLookup, ResolveResourceReferences,
|
// Intentionally placed before ResolveVariableReferencesInLookup, ResolveResourceReferences,
|
||||||
// ResolveVariableReferencesInComplexVariables and ResolveVariableReferences.
|
// ResolveVariableReferencesInComplexVariables and ResolveVariableReferences.
|
||||||
|
@ -55,6 +62,8 @@ func Initialize() bundle.Mutator {
|
||||||
"workspace",
|
"workspace",
|
||||||
"variables",
|
"variables",
|
||||||
),
|
),
|
||||||
|
// Provide permission config errors & warnings after initializing all variables
|
||||||
|
permissions.PermissionDiagnostics(),
|
||||||
mutator.SetRunAs(),
|
mutator.SetRunAs(),
|
||||||
mutator.OverrideCompute(),
|
mutator.OverrideCompute(),
|
||||||
mutator.ConfigureDashboardDefaults(),
|
mutator.ConfigureDashboardDefaults(),
|
||||||
|
|
|
@ -1,13 +1,16 @@
|
||||||
package render
|
package render
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
"sort"
|
||||||
"strings"
|
"strings"
|
||||||
"text/template"
|
"text/template"
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle"
|
"github.com/databricks/cli/bundle"
|
||||||
|
"github.com/databricks/cli/libs/cmdio"
|
||||||
"github.com/databricks/cli/libs/diag"
|
"github.com/databricks/cli/libs/diag"
|
||||||
"github.com/databricks/databricks-sdk-go/service/iam"
|
"github.com/databricks/databricks-sdk-go/service/iam"
|
||||||
"github.com/fatih/color"
|
"github.com/fatih/color"
|
||||||
|
@ -28,35 +31,7 @@ var renderFuncMap = template.FuncMap{
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
const errorTemplate = `{{ "Error" | red }}: {{ .Summary }}
|
const summaryHeaderTemplate = `{{- if .Name -}}
|
||||||
{{- range $index, $element := .Paths }}
|
|
||||||
{{ if eq $index 0 }}at {{else}} {{ end}}{{ $element.String | green }}
|
|
||||||
{{- end }}
|
|
||||||
{{- range $index, $element := .Locations }}
|
|
||||||
{{ if eq $index 0 }}in {{else}} {{ end}}{{ $element.String | cyan }}
|
|
||||||
{{- end }}
|
|
||||||
{{- if .Detail }}
|
|
||||||
|
|
||||||
{{ .Detail }}
|
|
||||||
{{- end }}
|
|
||||||
|
|
||||||
`
|
|
||||||
|
|
||||||
const warningTemplate = `{{ "Warning" | yellow }}: {{ .Summary }}
|
|
||||||
{{- range $index, $element := .Paths }}
|
|
||||||
{{ if eq $index 0 }}at {{else}} {{ end}}{{ $element.String | green }}
|
|
||||||
{{- end }}
|
|
||||||
{{- range $index, $element := .Locations }}
|
|
||||||
{{ if eq $index 0 }}in {{else}} {{ end}}{{ $element.String | cyan }}
|
|
||||||
{{- end }}
|
|
||||||
{{- if .Detail }}
|
|
||||||
|
|
||||||
{{ .Detail }}
|
|
||||||
{{- end }}
|
|
||||||
|
|
||||||
`
|
|
||||||
|
|
||||||
const summaryTemplate = `{{- if .Name -}}
|
|
||||||
Name: {{ .Name | bold }}
|
Name: {{ .Name | bold }}
|
||||||
{{- if .Target }}
|
{{- if .Target }}
|
||||||
Target: {{ .Target | bold }}
|
Target: {{ .Target | bold }}
|
||||||
|
@ -73,12 +48,30 @@ Workspace:
|
||||||
Path: {{ .Path | bold }}
|
Path: {{ .Path | bold }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
{{ end -}}`
|
||||||
|
|
||||||
{{ end -}}
|
const resourcesTemplate = `Resources:
|
||||||
|
{{- range . }}
|
||||||
{{ .Trailer }}
|
{{ .GroupName }}:
|
||||||
|
{{- range .Resources }}
|
||||||
|
{{ .Key | bold }}:
|
||||||
|
Name: {{ .Name }}
|
||||||
|
URL: {{ if .URL }}{{ .URL | cyan }}{{ else }}{{ "(not deployed)" | cyan }}{{ end }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
`
|
`
|
||||||
|
|
||||||
|
type ResourceGroup struct {
|
||||||
|
GroupName string
|
||||||
|
Resources []ResourceInfo
|
||||||
|
}
|
||||||
|
|
||||||
|
type ResourceInfo struct {
|
||||||
|
Key string
|
||||||
|
Name string
|
||||||
|
URL string
|
||||||
|
}
|
||||||
|
|
||||||
func pluralize(n int, singular, plural string) string {
|
func pluralize(n int, singular, plural string) string {
|
||||||
if n == 1 {
|
if n == 1 {
|
||||||
return fmt.Sprintf("%d %s", n, singular)
|
return fmt.Sprintf("%d %s", n, singular)
|
||||||
|
@ -94,16 +87,27 @@ func buildTrailer(diags diag.Diagnostics) string {
|
||||||
if warnings := len(diags.Filter(diag.Warning)); warnings > 0 {
|
if warnings := len(diags.Filter(diag.Warning)); warnings > 0 {
|
||||||
parts = append(parts, color.YellowString(pluralize(warnings, "warning", "warnings")))
|
parts = append(parts, color.YellowString(pluralize(warnings, "warning", "warnings")))
|
||||||
}
|
}
|
||||||
if len(parts) > 0 {
|
if recommendations := len(diags.Filter(diag.Recommendation)); recommendations > 0 {
|
||||||
return fmt.Sprintf("Found %s", strings.Join(parts, " and "))
|
parts = append(parts, color.BlueString(pluralize(recommendations, "recommendation", "recommendations")))
|
||||||
} else {
|
}
|
||||||
return color.GreenString("Validation OK!")
|
switch {
|
||||||
|
case len(parts) >= 3:
|
||||||
|
first := strings.Join(parts[:len(parts)-1], ", ")
|
||||||
|
last := parts[len(parts)-1]
|
||||||
|
return fmt.Sprintf("Found %s, and %s\n", first, last)
|
||||||
|
case len(parts) == 2:
|
||||||
|
return fmt.Sprintf("Found %s and %s\n", parts[0], parts[1])
|
||||||
|
case len(parts) == 1:
|
||||||
|
return fmt.Sprintf("Found %s\n", parts[0])
|
||||||
|
default:
|
||||||
|
// No diagnostics to print.
|
||||||
|
return color.GreenString("Validation OK!\n")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func renderSummaryTemplate(out io.Writer, b *bundle.Bundle, diags diag.Diagnostics) error {
|
func renderSummaryHeaderTemplate(out io.Writer, b *bundle.Bundle) error {
|
||||||
if b == nil {
|
if b == nil {
|
||||||
return renderSummaryTemplate(out, &bundle.Bundle{}, diags)
|
return renderSummaryHeaderTemplate(out, &bundle.Bundle{})
|
||||||
}
|
}
|
||||||
|
|
||||||
var currentUser = &iam.User{}
|
var currentUser = &iam.User{}
|
||||||
|
@ -114,33 +118,20 @@ func renderSummaryTemplate(out io.Writer, b *bundle.Bundle, diags diag.Diagnosti
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
t := template.Must(template.New("summary").Funcs(renderFuncMap).Parse(summaryTemplate))
|
t := template.Must(template.New("summary").Funcs(renderFuncMap).Parse(summaryHeaderTemplate))
|
||||||
err := t.Execute(out, map[string]any{
|
err := t.Execute(out, map[string]any{
|
||||||
"Name": b.Config.Bundle.Name,
|
"Name": b.Config.Bundle.Name,
|
||||||
"Target": b.Config.Bundle.Target,
|
"Target": b.Config.Bundle.Target,
|
||||||
"User": currentUser.UserName,
|
"User": currentUser.UserName,
|
||||||
"Path": b.Config.Workspace.RootPath,
|
"Path": b.Config.Workspace.RootPath,
|
||||||
"Host": b.Config.Workspace.Host,
|
"Host": b.Config.Workspace.Host,
|
||||||
"Trailer": buildTrailer(diags),
|
|
||||||
})
|
})
|
||||||
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func renderDiagnostics(out io.Writer, b *bundle.Bundle, diags diag.Diagnostics) error {
|
func renderDiagnosticsOnly(out io.Writer, b *bundle.Bundle, diags diag.Diagnostics) error {
|
||||||
errorT := template.Must(template.New("error").Funcs(renderFuncMap).Parse(errorTemplate))
|
|
||||||
warningT := template.Must(template.New("warning").Funcs(renderFuncMap).Parse(warningTemplate))
|
|
||||||
|
|
||||||
// Print errors and warnings.
|
|
||||||
for _, d := range diags {
|
for _, d := range diags {
|
||||||
var t *template.Template
|
|
||||||
switch d.Severity {
|
|
||||||
case diag.Error:
|
|
||||||
t = errorT
|
|
||||||
case diag.Warning:
|
|
||||||
t = warningT
|
|
||||||
}
|
|
||||||
|
|
||||||
for i := range d.Locations {
|
for i := range d.Locations {
|
||||||
if b == nil {
|
if b == nil {
|
||||||
break
|
break
|
||||||
|
@ -155,15 +146,9 @@ func renderDiagnostics(out io.Writer, b *bundle.Bundle, diags diag.Diagnostics)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Render the diagnostic with the appropriate template.
|
|
||||||
err := t.Execute(out, d)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to render template: %w", err)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return cmdio.RenderDiagnostics(out, diags)
|
||||||
}
|
}
|
||||||
|
|
||||||
// RenderOptions contains options for rendering diagnostics.
|
// RenderOptions contains options for rendering diagnostics.
|
||||||
|
@ -173,19 +158,73 @@ type RenderOptions struct {
|
||||||
RenderSummaryTable bool
|
RenderSummaryTable bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// RenderTextOutput renders the diagnostics in a human-readable format.
|
// RenderDiagnostics renders the diagnostics in a human-readable format.
|
||||||
func RenderTextOutput(out io.Writer, b *bundle.Bundle, diags diag.Diagnostics, opts RenderOptions) error {
|
func RenderDiagnostics(out io.Writer, b *bundle.Bundle, diags diag.Diagnostics, opts RenderOptions) error {
|
||||||
err := renderDiagnostics(out, b, diags)
|
err := renderDiagnosticsOnly(out, b, diags)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to render diagnostics: %w", err)
|
return fmt.Errorf("failed to render diagnostics: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if opts.RenderSummaryTable {
|
if opts.RenderSummaryTable {
|
||||||
err = renderSummaryTemplate(out, b, diags)
|
if b != nil {
|
||||||
if err != nil {
|
err = renderSummaryHeaderTemplate(out, b)
|
||||||
return fmt.Errorf("failed to render summary: %w", err)
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to render summary: %w", err)
|
||||||
|
}
|
||||||
|
io.WriteString(out, "\n")
|
||||||
}
|
}
|
||||||
|
trailer := buildTrailer(diags)
|
||||||
|
io.WriteString(out, trailer)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func RenderSummary(ctx context.Context, out io.Writer, b *bundle.Bundle) error {
|
||||||
|
if err := renderSummaryHeaderTemplate(out, b); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
var resourceGroups []ResourceGroup
|
||||||
|
|
||||||
|
for _, group := range b.Config.Resources.AllResources() {
|
||||||
|
resources := make([]ResourceInfo, 0, len(group.Resources))
|
||||||
|
for key, resource := range group.Resources {
|
||||||
|
resources = append(resources, ResourceInfo{
|
||||||
|
Key: key,
|
||||||
|
Name: resource.GetName(),
|
||||||
|
URL: resource.GetURL(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(resources) > 0 {
|
||||||
|
resourceGroups = append(resourceGroups, ResourceGroup{
|
||||||
|
GroupName: group.Description.PluralTitle,
|
||||||
|
Resources: resources,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := renderResourcesTemplate(out, resourceGroups); err != nil {
|
||||||
|
return fmt.Errorf("failed to render resources template: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Helper function to sort and render resource groups using the template
|
||||||
|
func renderResourcesTemplate(out io.Writer, resourceGroups []ResourceGroup) error {
|
||||||
|
// Sort everything to ensure consistent output
|
||||||
|
sort.Slice(resourceGroups, func(i, j int) bool {
|
||||||
|
return resourceGroups[i].GroupName < resourceGroups[j].GroupName
|
||||||
|
})
|
||||||
|
for _, group := range resourceGroups {
|
||||||
|
sort.Slice(group.Resources, func(i, j int) bool {
|
||||||
|
return group.Resources[i].Key < group.Resources[j].Key
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
t := template.Must(template.New("resources").Funcs(renderFuncMap).Parse(resourcesTemplate))
|
||||||
|
|
||||||
|
return t.Execute(out, resourceGroups)
|
||||||
|
}
|
||||||
|
|
|
@ -2,14 +2,21 @@ package render
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"io"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle"
|
"github.com/databricks/cli/bundle"
|
||||||
"github.com/databricks/cli/bundle/config"
|
"github.com/databricks/cli/bundle/config"
|
||||||
|
"github.com/databricks/cli/bundle/config/resources"
|
||||||
"github.com/databricks/cli/libs/diag"
|
"github.com/databricks/cli/libs/diag"
|
||||||
"github.com/databricks/cli/libs/dyn"
|
"github.com/databricks/cli/libs/dyn"
|
||||||
assert "github.com/databricks/cli/libs/dyn/dynassert"
|
"github.com/databricks/databricks-sdk-go/service/catalog"
|
||||||
"github.com/databricks/databricks-sdk-go/service/iam"
|
"github.com/databricks/databricks-sdk-go/service/iam"
|
||||||
|
"github.com/databricks/databricks-sdk-go/service/jobs"
|
||||||
|
"github.com/databricks/databricks-sdk-go/service/pipelines"
|
||||||
|
"github.com/databricks/databricks-sdk-go/service/serving"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -45,6 +52,19 @@ func TestRenderTextOutput(t *testing.T) {
|
||||||
"\n" +
|
"\n" +
|
||||||
"Found 1 error\n",
|
"Found 1 error\n",
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
name: "nil bundle and 1 recommendation",
|
||||||
|
diags: diag.Diagnostics{
|
||||||
|
{
|
||||||
|
Severity: diag.Recommendation,
|
||||||
|
Summary: "recommendation",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
opts: RenderOptions{RenderSummaryTable: true},
|
||||||
|
expected: "Recommendation: recommendation\n" +
|
||||||
|
"\n" +
|
||||||
|
"Found 1 recommendation\n",
|
||||||
|
},
|
||||||
{
|
{
|
||||||
name: "bundle during 'load' and 1 error",
|
name: "bundle during 'load' and 1 error",
|
||||||
bundle: loadingBundle,
|
bundle: loadingBundle,
|
||||||
|
@ -84,7 +104,7 @@ func TestRenderTextOutput(t *testing.T) {
|
||||||
"Found 2 warnings\n",
|
"Found 2 warnings\n",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "bundle during 'load' and 2 errors, 1 warning with details",
|
name: "bundle during 'load' and 2 errors, 1 warning and 1 recommendation with details",
|
||||||
bundle: loadingBundle,
|
bundle: loadingBundle,
|
||||||
diags: diag.Diagnostics{
|
diags: diag.Diagnostics{
|
||||||
diag.Diagnostic{
|
diag.Diagnostic{
|
||||||
|
@ -105,6 +125,12 @@ func TestRenderTextOutput(t *testing.T) {
|
||||||
Detail: "detail (3)",
|
Detail: "detail (3)",
|
||||||
Locations: []dyn.Location{{File: "foo.py", Line: 3, Column: 1}},
|
Locations: []dyn.Location{{File: "foo.py", Line: 3, Column: 1}},
|
||||||
},
|
},
|
||||||
|
diag.Diagnostic{
|
||||||
|
Severity: diag.Recommendation,
|
||||||
|
Summary: "recommendation (4)",
|
||||||
|
Detail: "detail (4)",
|
||||||
|
Locations: []dyn.Location{{File: "foo.py", Line: 4, Column: 1}},
|
||||||
|
},
|
||||||
},
|
},
|
||||||
opts: RenderOptions{RenderSummaryTable: true},
|
opts: RenderOptions{RenderSummaryTable: true},
|
||||||
expected: "Error: error (1)\n" +
|
expected: "Error: error (1)\n" +
|
||||||
|
@ -122,10 +148,114 @@ func TestRenderTextOutput(t *testing.T) {
|
||||||
"\n" +
|
"\n" +
|
||||||
"detail (3)\n" +
|
"detail (3)\n" +
|
||||||
"\n" +
|
"\n" +
|
||||||
|
"Recommendation: recommendation (4)\n" +
|
||||||
|
" in foo.py:4:1\n" +
|
||||||
|
"\n" +
|
||||||
|
"detail (4)\n" +
|
||||||
|
"\n" +
|
||||||
"Name: test-bundle\n" +
|
"Name: test-bundle\n" +
|
||||||
"Target: test-target\n" +
|
"Target: test-target\n" +
|
||||||
"\n" +
|
"\n" +
|
||||||
"Found 2 errors and 1 warning\n",
|
"Found 2 errors, 1 warning, and 1 recommendation\n",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "bundle during 'load' and 1 error and 1 warning",
|
||||||
|
bundle: loadingBundle,
|
||||||
|
diags: diag.Diagnostics{
|
||||||
|
diag.Diagnostic{
|
||||||
|
Severity: diag.Error,
|
||||||
|
Summary: "error (1)",
|
||||||
|
Detail: "detail (1)",
|
||||||
|
Locations: []dyn.Location{{File: "foo.py", Line: 1, Column: 1}},
|
||||||
|
},
|
||||||
|
diag.Diagnostic{
|
||||||
|
Severity: diag.Warning,
|
||||||
|
Summary: "warning (2)",
|
||||||
|
Detail: "detail (2)",
|
||||||
|
Locations: []dyn.Location{{File: "foo.py", Line: 2, Column: 1}},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
opts: RenderOptions{RenderSummaryTable: true},
|
||||||
|
expected: "Error: error (1)\n" +
|
||||||
|
" in foo.py:1:1\n" +
|
||||||
|
"\n" +
|
||||||
|
"detail (1)\n" +
|
||||||
|
"\n" +
|
||||||
|
"Warning: warning (2)\n" +
|
||||||
|
" in foo.py:2:1\n" +
|
||||||
|
"\n" +
|
||||||
|
"detail (2)\n" +
|
||||||
|
"\n" +
|
||||||
|
"Name: test-bundle\n" +
|
||||||
|
"Target: test-target\n" +
|
||||||
|
"\n" +
|
||||||
|
"Found 1 error and 1 warning\n",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "bundle during 'load' and 1 errors, 2 warning and 2 recommendations with details",
|
||||||
|
bundle: loadingBundle,
|
||||||
|
diags: diag.Diagnostics{
|
||||||
|
diag.Diagnostic{
|
||||||
|
Severity: diag.Error,
|
||||||
|
Summary: "error (1)",
|
||||||
|
Detail: "detail (1)",
|
||||||
|
Locations: []dyn.Location{{File: "foo.py", Line: 1, Column: 1}},
|
||||||
|
},
|
||||||
|
diag.Diagnostic{
|
||||||
|
Severity: diag.Warning,
|
||||||
|
Summary: "warning (2)",
|
||||||
|
Detail: "detail (2)",
|
||||||
|
Locations: []dyn.Location{{File: "foo.py", Line: 2, Column: 1}},
|
||||||
|
},
|
||||||
|
diag.Diagnostic{
|
||||||
|
Severity: diag.Warning,
|
||||||
|
Summary: "warning (3)",
|
||||||
|
Detail: "detail (3)",
|
||||||
|
Locations: []dyn.Location{{File: "foo.py", Line: 3, Column: 1}},
|
||||||
|
},
|
||||||
|
diag.Diagnostic{
|
||||||
|
Severity: diag.Recommendation,
|
||||||
|
Summary: "recommendation (4)",
|
||||||
|
Detail: "detail (4)",
|
||||||
|
Locations: []dyn.Location{{File: "foo.py", Line: 4, Column: 1}},
|
||||||
|
},
|
||||||
|
diag.Diagnostic{
|
||||||
|
Severity: diag.Recommendation,
|
||||||
|
Summary: "recommendation (5)",
|
||||||
|
Detail: "detail (5)",
|
||||||
|
Locations: []dyn.Location{{File: "foo.py", Line: 5, Column: 1}},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
opts: RenderOptions{RenderSummaryTable: true},
|
||||||
|
expected: "Error: error (1)\n" +
|
||||||
|
" in foo.py:1:1\n" +
|
||||||
|
"\n" +
|
||||||
|
"detail (1)\n" +
|
||||||
|
"\n" +
|
||||||
|
"Warning: warning (2)\n" +
|
||||||
|
" in foo.py:2:1\n" +
|
||||||
|
"\n" +
|
||||||
|
"detail (2)\n" +
|
||||||
|
"\n" +
|
||||||
|
"Warning: warning (3)\n" +
|
||||||
|
" in foo.py:3:1\n" +
|
||||||
|
"\n" +
|
||||||
|
"detail (3)\n" +
|
||||||
|
"\n" +
|
||||||
|
"Recommendation: recommendation (4)\n" +
|
||||||
|
" in foo.py:4:1\n" +
|
||||||
|
"\n" +
|
||||||
|
"detail (4)\n" +
|
||||||
|
"\n" +
|
||||||
|
"Recommendation: recommendation (5)\n" +
|
||||||
|
" in foo.py:5:1\n" +
|
||||||
|
"\n" +
|
||||||
|
"detail (5)\n" +
|
||||||
|
"\n" +
|
||||||
|
"Name: test-bundle\n" +
|
||||||
|
"Target: test-target\n" +
|
||||||
|
"\n" +
|
||||||
|
"Found 1 error, 2 warnings, and 2 recommendations\n",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "bundle during 'init'",
|
name: "bundle during 'init'",
|
||||||
|
@ -158,7 +288,7 @@ func TestRenderTextOutput(t *testing.T) {
|
||||||
"Validation OK!\n",
|
"Validation OK!\n",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "nil bundle without summary with 1 error and 1 warning",
|
name: "nil bundle without summary with 1 error, 1 warning and 1 recommendation",
|
||||||
bundle: nil,
|
bundle: nil,
|
||||||
diags: diag.Diagnostics{
|
diags: diag.Diagnostics{
|
||||||
diag.Diagnostic{
|
diag.Diagnostic{
|
||||||
|
@ -173,6 +303,12 @@ func TestRenderTextOutput(t *testing.T) {
|
||||||
Detail: "detail (2)",
|
Detail: "detail (2)",
|
||||||
Locations: []dyn.Location{{File: "foo.py", Line: 3, Column: 1}},
|
Locations: []dyn.Location{{File: "foo.py", Line: 3, Column: 1}},
|
||||||
},
|
},
|
||||||
|
diag.Diagnostic{
|
||||||
|
Severity: diag.Recommendation,
|
||||||
|
Summary: "recommendation (3)",
|
||||||
|
Detail: "detail (3)",
|
||||||
|
Locations: []dyn.Location{{File: "foo.py", Line: 5, Column: 1}},
|
||||||
|
},
|
||||||
},
|
},
|
||||||
opts: RenderOptions{RenderSummaryTable: false},
|
opts: RenderOptions{RenderSummaryTable: false},
|
||||||
expected: "Error: error (1)\n" +
|
expected: "Error: error (1)\n" +
|
||||||
|
@ -184,6 +320,11 @@ func TestRenderTextOutput(t *testing.T) {
|
||||||
" in foo.py:3:1\n" +
|
" in foo.py:3:1\n" +
|
||||||
"\n" +
|
"\n" +
|
||||||
"detail (2)\n" +
|
"detail (2)\n" +
|
||||||
|
"\n" +
|
||||||
|
"Recommendation: recommendation (3)\n" +
|
||||||
|
" in foo.py:5:1\n" +
|
||||||
|
"\n" +
|
||||||
|
"detail (3)\n" +
|
||||||
"\n",
|
"\n",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
@ -192,7 +333,7 @@ func TestRenderTextOutput(t *testing.T) {
|
||||||
t.Run(tc.name, func(t *testing.T) {
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
writer := &bytes.Buffer{}
|
writer := &bytes.Buffer{}
|
||||||
|
|
||||||
err := RenderTextOutput(writer, tc.bundle, tc.diags, tc.opts)
|
err := RenderDiagnostics(writer, tc.bundle, tc.diags, tc.opts)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
assert.Equal(t, tc.expected, writer.String())
|
assert.Equal(t, tc.expected, writer.String())
|
||||||
|
@ -304,13 +445,37 @@ func TestRenderDiagnostics(t *testing.T) {
|
||||||
"\n" +
|
"\n" +
|
||||||
"'name' is required\n\n",
|
"'name' is required\n\n",
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
name: "recommendation with multiple paths and locations",
|
||||||
|
diags: diag.Diagnostics{
|
||||||
|
{
|
||||||
|
Severity: diag.Recommendation,
|
||||||
|
Summary: "summary",
|
||||||
|
Detail: "detail",
|
||||||
|
Paths: []dyn.Path{
|
||||||
|
dyn.MustPathFromString("resources.jobs.xxx"),
|
||||||
|
dyn.MustPathFromString("resources.jobs.yyy"),
|
||||||
|
},
|
||||||
|
Locations: []dyn.Location{
|
||||||
|
{File: "foo.yaml", Line: 1, Column: 2},
|
||||||
|
{File: "bar.yaml", Line: 3, Column: 4},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expected: "Recommendation: summary\n" +
|
||||||
|
" at resources.jobs.xxx\n" +
|
||||||
|
" resources.jobs.yyy\n" +
|
||||||
|
" in foo.yaml:1:2\n" +
|
||||||
|
" bar.yaml:3:4\n\n" +
|
||||||
|
"detail\n\n",
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, tc := range testCases {
|
for _, tc := range testCases {
|
||||||
t.Run(tc.name, func(t *testing.T) {
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
writer := &bytes.Buffer{}
|
writer := &bytes.Buffer{}
|
||||||
|
|
||||||
err := renderDiagnostics(writer, bundle, tc.diags)
|
err := renderDiagnosticsOnly(writer, bundle, tc.diags)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
assert.Equal(t, tc.expected, writer.String())
|
assert.Equal(t, tc.expected, writer.String())
|
||||||
|
@ -321,8 +486,105 @@ func TestRenderDiagnostics(t *testing.T) {
|
||||||
func TestRenderSummaryTemplate_nilBundle(t *testing.T) {
|
func TestRenderSummaryTemplate_nilBundle(t *testing.T) {
|
||||||
writer := &bytes.Buffer{}
|
writer := &bytes.Buffer{}
|
||||||
|
|
||||||
err := renderSummaryTemplate(writer, nil, nil)
|
err := renderSummaryHeaderTemplate(writer, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
io.WriteString(writer, buildTrailer(nil))
|
||||||
|
|
||||||
assert.Equal(t, "Validation OK!\n", writer.String())
|
assert.Equal(t, "Validation OK!\n", writer.String())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestRenderSummary(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
|
// Create a mock bundle with various resources
|
||||||
|
b := &bundle.Bundle{
|
||||||
|
Config: config.Root{
|
||||||
|
Bundle: config.Bundle{
|
||||||
|
Name: "test-bundle",
|
||||||
|
Target: "test-target",
|
||||||
|
},
|
||||||
|
Workspace: config.Workspace{
|
||||||
|
Host: "https://mycompany.databricks.com/",
|
||||||
|
},
|
||||||
|
Resources: config.Resources{
|
||||||
|
Jobs: map[string]*resources.Job{
|
||||||
|
"job1": {
|
||||||
|
ID: "1",
|
||||||
|
URL: "https://url1",
|
||||||
|
JobSettings: &jobs.JobSettings{Name: "job1-name"},
|
||||||
|
},
|
||||||
|
"job2": {
|
||||||
|
ID: "2",
|
||||||
|
URL: "https://url2",
|
||||||
|
JobSettings: &jobs.JobSettings{Name: "job2-name"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Pipelines: map[string]*resources.Pipeline{
|
||||||
|
"pipeline2": {
|
||||||
|
ID: "4",
|
||||||
|
// no URL
|
||||||
|
PipelineSpec: &pipelines.PipelineSpec{Name: "pipeline2-name"},
|
||||||
|
},
|
||||||
|
"pipeline1": {
|
||||||
|
ID: "3",
|
||||||
|
URL: "https://url3",
|
||||||
|
PipelineSpec: &pipelines.PipelineSpec{Name: "pipeline1-name"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Schemas: map[string]*resources.Schema{
|
||||||
|
"schema1": {
|
||||||
|
ID: "catalog.schema",
|
||||||
|
CreateSchema: &catalog.CreateSchema{
|
||||||
|
Name: "schema",
|
||||||
|
},
|
||||||
|
// no URL
|
||||||
|
},
|
||||||
|
},
|
||||||
|
ModelServingEndpoints: map[string]*resources.ModelServingEndpoint{
|
||||||
|
"endpoint1": {
|
||||||
|
ID: "7",
|
||||||
|
CreateServingEndpoint: &serving.CreateServingEndpoint{
|
||||||
|
Name: "my_serving_endpoint",
|
||||||
|
},
|
||||||
|
URL: "https://url4",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
writer := &bytes.Buffer{}
|
||||||
|
err := RenderSummary(ctx, writer, b)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
expectedSummary := `Name: test-bundle
|
||||||
|
Target: test-target
|
||||||
|
Workspace:
|
||||||
|
Host: https://mycompany.databricks.com/
|
||||||
|
Resources:
|
||||||
|
Jobs:
|
||||||
|
job1:
|
||||||
|
Name: job1-name
|
||||||
|
URL: https://url1
|
||||||
|
job2:
|
||||||
|
Name: job2-name
|
||||||
|
URL: https://url2
|
||||||
|
Model Serving Endpoints:
|
||||||
|
endpoint1:
|
||||||
|
Name: my_serving_endpoint
|
||||||
|
URL: https://url4
|
||||||
|
Pipelines:
|
||||||
|
pipeline1:
|
||||||
|
Name: pipeline1-name
|
||||||
|
URL: https://url3
|
||||||
|
pipeline2:
|
||||||
|
Name: pipeline2-name
|
||||||
|
URL: (not deployed)
|
||||||
|
Schemas:
|
||||||
|
schema1:
|
||||||
|
Name: schema
|
||||||
|
URL: (not deployed)
|
||||||
|
`
|
||||||
|
assert.Equal(t, expectedSummary, writer.String())
|
||||||
|
}
|
||||||
|
|
|
@ -39,7 +39,7 @@ func TestJsonSchema(t *testing.T) {
|
||||||
|
|
||||||
// Assert job fields have their descriptions loaded.
|
// Assert job fields have their descriptions loaded.
|
||||||
resourceJob := walk(s.Definitions, "github.com", "databricks", "cli", "bundle", "config", "resources.Job")
|
resourceJob := walk(s.Definitions, "github.com", "databricks", "cli", "bundle", "config", "resources.Job")
|
||||||
fields := []string{"name", "continuous", "deployment", "tasks", "trigger"}
|
fields := []string{"name", "continuous", "tasks", "trigger"}
|
||||||
for _, field := range fields {
|
for _, field := range fields {
|
||||||
assert.NotEmpty(t, resourceJob.AnyOf[0].Properties[field].Description)
|
assert.NotEmpty(t, resourceJob.AnyOf[0].Properties[field].Description)
|
||||||
}
|
}
|
||||||
|
@ -53,7 +53,7 @@ func TestJsonSchema(t *testing.T) {
|
||||||
|
|
||||||
// Assert descriptions are loaded for pipelines
|
// Assert descriptions are loaded for pipelines
|
||||||
pipeline := walk(s.Definitions, "github.com", "databricks", "cli", "bundle", "config", "resources.Pipeline")
|
pipeline := walk(s.Definitions, "github.com", "databricks", "cli", "bundle", "config", "resources.Pipeline")
|
||||||
fields = []string{"name", "catalog", "clusters", "channel", "continuous", "deployment", "development"}
|
fields = []string{"name", "catalog", "clusters", "channel", "continuous", "development"}
|
||||||
for _, field := range fields {
|
for _, field := range fields {
|
||||||
assert.NotEmpty(t, pipeline.AnyOf[0].Properties[field].Description)
|
assert.NotEmpty(t, pipeline.AnyOf[0].Properties[field].Description)
|
||||||
}
|
}
|
||||||
|
|
|
@ -59,6 +59,127 @@
|
||||||
"cli": {
|
"cli": {
|
||||||
"bundle": {
|
"bundle": {
|
||||||
"config": {
|
"config": {
|
||||||
|
"resources.Cluster": {
|
||||||
|
"anyOf": [
|
||||||
|
{
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"apply_policy_default_values": {
|
||||||
|
"description": "When set to true, fixed and default values from the policy will be used for fields that are omitted. When set to false, only fixed values from the policy will be applied.",
|
||||||
|
"$ref": "#/$defs/bool"
|
||||||
|
},
|
||||||
|
"autoscale": {
|
||||||
|
"description": "Parameters needed in order to automatically scale clusters up and down based on load.\nNote: autoscaling works best with DB runtime versions 3.0 or later.",
|
||||||
|
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/compute.AutoScale"
|
||||||
|
},
|
||||||
|
"autotermination_minutes": {
|
||||||
|
"description": "Automatically terminates the cluster after it is inactive for this time in minutes. If not set,\nthis cluster will not be automatically terminated. If specified, the threshold must be between\n10 and 10000 minutes.\nUsers can also set this value to 0 to explicitly disable automatic termination.",
|
||||||
|
"$ref": "#/$defs/int"
|
||||||
|
},
|
||||||
|
"aws_attributes": {
|
||||||
|
"description": "Attributes related to clusters running on Amazon Web Services.\nIf not specified at cluster creation, a set of default values will be used.",
|
||||||
|
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/compute.AwsAttributes"
|
||||||
|
},
|
||||||
|
"azure_attributes": {
|
||||||
|
"description": "Attributes related to clusters running on Microsoft Azure.\nIf not specified at cluster creation, a set of default values will be used.",
|
||||||
|
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/compute.AzureAttributes"
|
||||||
|
},
|
||||||
|
"cluster_log_conf": {
|
||||||
|
"description": "The configuration for delivering spark logs to a long-term storage destination.\nTwo kinds of destinations (dbfs and s3) are supported. Only one destination can be specified\nfor one cluster. If the conf is given, the logs will be delivered to the destination every\n`5 mins`. The destination of driver logs is `$destination/$clusterId/driver`, while\nthe destination of executor logs is `$destination/$clusterId/executor`.",
|
||||||
|
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/compute.ClusterLogConf"
|
||||||
|
},
|
||||||
|
"cluster_name": {
|
||||||
|
"description": "Cluster name requested by the user. This doesn't have to be unique.\nIf not specified at creation, the cluster name will be an empty string.\n",
|
||||||
|
"$ref": "#/$defs/string"
|
||||||
|
},
|
||||||
|
"custom_tags": {
|
||||||
|
"description": "Additional tags for cluster resources. Databricks will tag all cluster resources (e.g., AWS\ninstances and EBS volumes) with these tags in addition to `default_tags`. Notes:\n\n- Currently, Databricks allows at most 45 custom tags\n\n- Clusters can only reuse cloud resources if the resources' tags are a subset of the cluster tags",
|
||||||
|
"$ref": "#/$defs/map/string"
|
||||||
|
},
|
||||||
|
"data_security_mode": {
|
||||||
|
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/compute.DataSecurityMode"
|
||||||
|
},
|
||||||
|
"docker_image": {
|
||||||
|
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/compute.DockerImage"
|
||||||
|
},
|
||||||
|
"driver_instance_pool_id": {
|
||||||
|
"description": "The optional ID of the instance pool for the driver of the cluster belongs.\nThe pool cluster uses the instance pool with id (instance_pool_id) if the driver pool is not\nassigned.",
|
||||||
|
"$ref": "#/$defs/string"
|
||||||
|
},
|
||||||
|
"driver_node_type_id": {
|
||||||
|
"description": "The node type of the Spark driver. Note that this field is optional;\nif unset, the driver node type will be set as the same value\nas `node_type_id` defined above.\n",
|
||||||
|
"$ref": "#/$defs/string"
|
||||||
|
},
|
||||||
|
"enable_elastic_disk": {
|
||||||
|
"description": "Autoscaling Local Storage: when enabled, this cluster will dynamically acquire additional disk\nspace when its Spark workers are running low on disk space. This feature requires specific AWS\npermissions to function correctly - refer to the User Guide for more details.",
|
||||||
|
"$ref": "#/$defs/bool"
|
||||||
|
},
|
||||||
|
"enable_local_disk_encryption": {
|
||||||
|
"description": "Whether to enable LUKS on cluster VMs' local disks",
|
||||||
|
"$ref": "#/$defs/bool"
|
||||||
|
},
|
||||||
|
"gcp_attributes": {
|
||||||
|
"description": "Attributes related to clusters running on Google Cloud Platform.\nIf not specified at cluster creation, a set of default values will be used.",
|
||||||
|
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/compute.GcpAttributes"
|
||||||
|
},
|
||||||
|
"init_scripts": {
|
||||||
|
"description": "The configuration for storing init scripts. Any number of destinations can be specified. The scripts are executed sequentially in the order provided. If `cluster_log_conf` is specified, init script logs are sent to `\u003cdestination\u003e/\u003ccluster-ID\u003e/init_scripts`.",
|
||||||
|
"$ref": "#/$defs/slice/github.com/databricks/databricks-sdk-go/service/compute.InitScriptInfo"
|
||||||
|
},
|
||||||
|
"instance_pool_id": {
|
||||||
|
"description": "The optional ID of the instance pool to which the cluster belongs.",
|
||||||
|
"$ref": "#/$defs/string"
|
||||||
|
},
|
||||||
|
"node_type_id": {
|
||||||
|
"description": "This field encodes, through a single value, the resources available to each of\nthe Spark nodes in this cluster. For example, the Spark nodes can be provisioned\nand optimized for memory or compute intensive workloads. A list of available node\ntypes can be retrieved by using the :method:clusters/listNodeTypes API call.\n",
|
||||||
|
"$ref": "#/$defs/string"
|
||||||
|
},
|
||||||
|
"num_workers": {
|
||||||
|
"description": "Number of worker nodes that this cluster should have. A cluster has one Spark Driver\nand `num_workers` Executors for a total of `num_workers` + 1 Spark nodes.\n\nNote: When reading the properties of a cluster, this field reflects the desired number\nof workers rather than the actual current number of workers. For instance, if a cluster\nis resized from 5 to 10 workers, this field will immediately be updated to reflect\nthe target size of 10 workers, whereas the workers listed in `spark_info` will gradually\nincrease from 5 to 10 as the new nodes are provisioned.",
|
||||||
|
"$ref": "#/$defs/int"
|
||||||
|
},
|
||||||
|
"permissions": {
|
||||||
|
"$ref": "#/$defs/slice/github.com/databricks/cli/bundle/config/resources.Permission"
|
||||||
|
},
|
||||||
|
"policy_id": {
|
||||||
|
"description": "The ID of the cluster policy used to create the cluster if applicable.",
|
||||||
|
"$ref": "#/$defs/string"
|
||||||
|
},
|
||||||
|
"runtime_engine": {
|
||||||
|
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/compute.RuntimeEngine"
|
||||||
|
},
|
||||||
|
"single_user_name": {
|
||||||
|
"description": "Single user name if data_security_mode is `SINGLE_USER`",
|
||||||
|
"$ref": "#/$defs/string"
|
||||||
|
},
|
||||||
|
"spark_conf": {
|
||||||
|
"description": "An object containing a set of optional, user-specified Spark configuration key-value pairs.\nUsers can also pass in a string of extra JVM options to the driver and the executors via\n`spark.driver.extraJavaOptions` and `spark.executor.extraJavaOptions` respectively.\n",
|
||||||
|
"$ref": "#/$defs/map/string"
|
||||||
|
},
|
||||||
|
"spark_env_vars": {
|
||||||
|
"description": "An object containing a set of optional, user-specified environment variable key-value pairs.\nPlease note that key-value pair of the form (X,Y) will be exported as is (i.e.,\n`export X='Y'`) while launching the driver and workers.\n\nIn order to specify an additional set of `SPARK_DAEMON_JAVA_OPTS`, we recommend appending\nthem to `$SPARK_DAEMON_JAVA_OPTS` as shown in the example below. This ensures that all\ndefault databricks managed environmental variables are included as well.\n\nExample Spark environment variables:\n`{\"SPARK_WORKER_MEMORY\": \"28000m\", \"SPARK_LOCAL_DIRS\": \"/local_disk0\"}` or\n`{\"SPARK_DAEMON_JAVA_OPTS\": \"$SPARK_DAEMON_JAVA_OPTS -Dspark.shuffle.service.enabled=true\"}`",
|
||||||
|
"$ref": "#/$defs/map/string"
|
||||||
|
},
|
||||||
|
"spark_version": {
|
||||||
|
"description": "The Spark version of the cluster, e.g. `3.3.x-scala2.11`.\nA list of available Spark versions can be retrieved by using\nthe :method:clusters/sparkVersions API call.\n",
|
||||||
|
"$ref": "#/$defs/string"
|
||||||
|
},
|
||||||
|
"ssh_public_keys": {
|
||||||
|
"description": "SSH public key contents that will be added to each Spark node in this cluster. The\ncorresponding private keys can be used to login with the user name `ubuntu` on port `2200`.\nUp to 10 keys can be specified.",
|
||||||
|
"$ref": "#/$defs/slice/string"
|
||||||
|
},
|
||||||
|
"workload_type": {
|
||||||
|
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/compute.WorkloadType"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"additionalProperties": false
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "string",
|
||||||
|
"pattern": "\\$\\{(var(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)+)\\}"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
"resources.Grant": {
|
"resources.Grant": {
|
||||||
"anyOf": [
|
"anyOf": [
|
||||||
{
|
{
|
||||||
|
@ -92,30 +213,18 @@
|
||||||
"description": "An optional continuous property for this job. The continuous property will ensure that there is always one run executing. Only one of `schedule` and `continuous` can be used.",
|
"description": "An optional continuous property for this job. The continuous property will ensure that there is always one run executing. Only one of `schedule` and `continuous` can be used.",
|
||||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.Continuous"
|
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.Continuous"
|
||||||
},
|
},
|
||||||
"deployment": {
|
|
||||||
"description": "Deployment information for jobs managed by external sources.",
|
|
||||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.JobDeployment"
|
|
||||||
},
|
|
||||||
"description": {
|
"description": {
|
||||||
"description": "An optional description for the job. The maximum length is 27700 characters in UTF-8 encoding.",
|
"description": "An optional description for the job. The maximum length is 27700 characters in UTF-8 encoding.",
|
||||||
"$ref": "#/$defs/string"
|
"$ref": "#/$defs/string"
|
||||||
},
|
},
|
||||||
"edit_mode": {
|
|
||||||
"description": "Edit mode of the job.\n\n* `UI_LOCKED`: The job is in a locked UI state and cannot be modified.\n* `EDITABLE`: The job is in an editable state and can be modified.",
|
|
||||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.JobEditMode"
|
|
||||||
},
|
|
||||||
"email_notifications": {
|
"email_notifications": {
|
||||||
"description": "An optional set of email addresses that is notified when runs of this job begin or complete as well as when this job is deleted.",
|
"description": "An optional set of email addresses that is notified when runs of this job begin or complete as well as when this job is deleted.",
|
||||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.JobEmailNotifications"
|
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.JobEmailNotifications"
|
||||||
},
|
},
|
||||||
"environments": {
|
"environments": {
|
||||||
"description": "A list of task execution environment specifications that can be referenced by tasks of this job.",
|
"description": "A list of task execution environment specifications that can be referenced by serverless tasks of this job.\nAn environment is required to be present for serverless tasks.\nFor serverless notebook tasks, the environment is accessible in the notebook environment panel.\nFor other serverless tasks, the task environment is required to be specified using environment_key in the task settings.",
|
||||||
"$ref": "#/$defs/slice/github.com/databricks/databricks-sdk-go/service/jobs.JobEnvironment"
|
"$ref": "#/$defs/slice/github.com/databricks/databricks-sdk-go/service/jobs.JobEnvironment"
|
||||||
},
|
},
|
||||||
"format": {
|
|
||||||
"description": "Used to tell what is the format of the job. This field is ignored in Create/Update/Reset calls. When using the Jobs API 2.1 this value is always set to `\"MULTI_TASK\"`.",
|
|
||||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.Format"
|
|
||||||
},
|
|
||||||
"git_source": {
|
"git_source": {
|
||||||
"description": "An optional specification for a remote Git repository containing the source code used by tasks. Version-controlled source code is supported by notebook, dbt, Python script, and SQL File tasks.\n\nIf `git_source` is set, these tasks retrieve the file from the remote repository by default. However, this behavior can be overridden by setting `source` to `WORKSPACE` on the task.\n\nNote: dbt and SQL File tasks support only version-controlled sources. If dbt or SQL File tasks are used, `git_source` must be defined on the job.",
|
"description": "An optional specification for a remote Git repository containing the source code used by tasks. Version-controlled source code is supported by notebook, dbt, Python script, and SQL File tasks.\n\nIf `git_source` is set, these tasks retrieve the file from the remote repository by default. However, this behavior can be overridden by setting `source` to `WORKSPACE` on the task.\n\nNote: dbt and SQL File tasks support only version-controlled sources. If dbt or SQL File tasks are used, `git_source` must be defined on the job.",
|
||||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.GitSource"
|
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.GitSource"
|
||||||
|
@ -281,6 +390,10 @@
|
||||||
{
|
{
|
||||||
"type": "object",
|
"type": "object",
|
||||||
"properties": {
|
"properties": {
|
||||||
|
"ai_gateway": {
|
||||||
|
"description": "The AI Gateway configuration for the serving endpoint. NOTE: only external model endpoints are supported as of now.",
|
||||||
|
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/serving.AiGatewayConfig"
|
||||||
|
},
|
||||||
"config": {
|
"config": {
|
||||||
"description": "The core config of the serving endpoint.",
|
"description": "The core config of the serving endpoint.",
|
||||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/serving.EndpointCoreConfigInput"
|
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/serving.EndpointCoreConfigInput"
|
||||||
|
@ -293,7 +406,7 @@
|
||||||
"$ref": "#/$defs/slice/github.com/databricks/cli/bundle/config/resources.Permission"
|
"$ref": "#/$defs/slice/github.com/databricks/cli/bundle/config/resources.Permission"
|
||||||
},
|
},
|
||||||
"rate_limits": {
|
"rate_limits": {
|
||||||
"description": "Rate limits to be applied to the serving endpoint. NOTE: only external and foundation model endpoints are supported as of now.",
|
"description": "Rate limits to be applied to the serving endpoint. NOTE: this field is deprecated, please use AI Gateway to manage rate limits.",
|
||||||
"$ref": "#/$defs/slice/github.com/databricks/databricks-sdk-go/service/serving.RateLimit"
|
"$ref": "#/$defs/slice/github.com/databricks/databricks-sdk-go/service/serving.RateLimit"
|
||||||
},
|
},
|
||||||
"route_optimized": {
|
"route_optimized": {
|
||||||
|
@ -351,6 +464,10 @@
|
||||||
{
|
{
|
||||||
"type": "object",
|
"type": "object",
|
||||||
"properties": {
|
"properties": {
|
||||||
|
"budget_policy_id": {
|
||||||
|
"description": "Budget policy of this pipeline.",
|
||||||
|
"$ref": "#/$defs/string"
|
||||||
|
},
|
||||||
"catalog": {
|
"catalog": {
|
||||||
"description": "A catalog in Unity Catalog to publish data from this pipeline to. If `target` is specified, tables in this pipeline are published to a `target` schema inside `catalog` (for example, `catalog`.`target`.`table`). If `target` is not specified, no data is published to Unity Catalog.",
|
"description": "A catalog in Unity Catalog to publish data from this pipeline to. If `target` is specified, tables in this pipeline are published to a `target` schema inside `catalog` (for example, `catalog`.`target`.`table`). If `target` is not specified, no data is published to Unity Catalog.",
|
||||||
"$ref": "#/$defs/string"
|
"$ref": "#/$defs/string"
|
||||||
|
@ -418,6 +535,10 @@
|
||||||
"description": "Whether Photon is enabled for this pipeline.",
|
"description": "Whether Photon is enabled for this pipeline.",
|
||||||
"$ref": "#/$defs/bool"
|
"$ref": "#/$defs/bool"
|
||||||
},
|
},
|
||||||
|
"schema": {
|
||||||
|
"description": "The default schema (database) where tables are read from or published to. The presence of this field implies that the pipeline is in direct publishing mode.",
|
||||||
|
"$ref": "#/$defs/string"
|
||||||
|
},
|
||||||
"serverless": {
|
"serverless": {
|
||||||
"description": "Whether serverless compute is enabled for this pipeline.",
|
"description": "Whether serverless compute is enabled for this pipeline.",
|
||||||
"$ref": "#/$defs/bool"
|
"$ref": "#/$defs/bool"
|
||||||
|
@ -747,6 +868,9 @@
|
||||||
{
|
{
|
||||||
"type": "object",
|
"type": "object",
|
||||||
"properties": {
|
"properties": {
|
||||||
|
"cluster_id": {
|
||||||
|
"$ref": "#/$defs/string"
|
||||||
|
},
|
||||||
"compute_id": {
|
"compute_id": {
|
||||||
"$ref": "#/$defs/string"
|
"$ref": "#/$defs/string"
|
||||||
},
|
},
|
||||||
|
@ -923,6 +1047,9 @@
|
||||||
{
|
{
|
||||||
"type": "object",
|
"type": "object",
|
||||||
"properties": {
|
"properties": {
|
||||||
|
"clusters": {
|
||||||
|
"$ref": "#/$defs/map/github.com/databricks/cli/bundle/config/resources.Cluster"
|
||||||
|
},
|
||||||
"experiments": {
|
"experiments": {
|
||||||
"$ref": "#/$defs/map/github.com/databricks/cli/bundle/config/resources.MlflowExperiment"
|
"$ref": "#/$defs/map/github.com/databricks/cli/bundle/config/resources.MlflowExperiment"
|
||||||
},
|
},
|
||||||
|
@ -990,6 +1117,9 @@
|
||||||
"bundle": {
|
"bundle": {
|
||||||
"$ref": "#/$defs/github.com/databricks/cli/bundle/config.Bundle"
|
"$ref": "#/$defs/github.com/databricks/cli/bundle/config.Bundle"
|
||||||
},
|
},
|
||||||
|
"cluster_id": {
|
||||||
|
"$ref": "#/$defs/string"
|
||||||
|
},
|
||||||
"compute_id": {
|
"compute_id": {
|
||||||
"$ref": "#/$defs/string"
|
"$ref": "#/$defs/string"
|
||||||
},
|
},
|
||||||
|
@ -1076,6 +1206,9 @@
|
||||||
"profile": {
|
"profile": {
|
||||||
"$ref": "#/$defs/string"
|
"$ref": "#/$defs/string"
|
||||||
},
|
},
|
||||||
|
"resource_path": {
|
||||||
|
"$ref": "#/$defs/string"
|
||||||
|
},
|
||||||
"root_path": {
|
"root_path": {
|
||||||
"$ref": "#/$defs/string"
|
"$ref": "#/$defs/string"
|
||||||
},
|
},
|
||||||
|
@ -2028,7 +2161,7 @@
|
||||||
},
|
},
|
||||||
"compute.RuntimeEngine": {
|
"compute.RuntimeEngine": {
|
||||||
"type": "string",
|
"type": "string",
|
||||||
"description": "Decides which runtime engine to be use, e.g. Standard vs. Photon. If unspecified, the runtime\nengine is inferred from spark_version.",
|
"description": "Determines the cluster's runtime engine, either standard or Photon.\n\nThis field is not compatible with legacy `spark_version` values that contain `-photon-`.\nRemove `-photon-` from the `spark_version` and set `runtime_engine` to `PHOTON`.\n\nIf left unspecified, the runtime engine defaults to standard unless the spark_version\ncontains -photon-, in which case Photon will be used.\n",
|
||||||
"enum": [
|
"enum": [
|
||||||
"NULL",
|
"NULL",
|
||||||
"STANDARD",
|
"STANDARD",
|
||||||
|
@ -2402,9 +2535,6 @@
|
||||||
"description": "Unique identifier of the service used to host the Git repository. The value is case insensitive.",
|
"description": "Unique identifier of the service used to host the Git repository. The value is case insensitive.",
|
||||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.GitProvider"
|
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.GitProvider"
|
||||||
},
|
},
|
||||||
"git_snapshot": {
|
|
||||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.GitSnapshot"
|
|
||||||
},
|
|
||||||
"git_tag": {
|
"git_tag": {
|
||||||
"description": "Name of the tag to be checked out and used by this job. This field cannot be specified in conjunction with git_branch or git_commit.",
|
"description": "Name of the tag to be checked out and used by this job. This field cannot be specified in conjunction with git_branch or git_commit.",
|
||||||
"$ref": "#/$defs/string"
|
"$ref": "#/$defs/string"
|
||||||
|
@ -2412,10 +2542,6 @@
|
||||||
"git_url": {
|
"git_url": {
|
||||||
"description": "URL of the repository to be cloned by this job.",
|
"description": "URL of the repository to be cloned by this job.",
|
||||||
"$ref": "#/$defs/string"
|
"$ref": "#/$defs/string"
|
||||||
},
|
|
||||||
"job_source": {
|
|
||||||
"description": "The source of the job specification in the remote repository when the job is source controlled.",
|
|
||||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.JobSource"
|
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"additionalProperties": false,
|
"additionalProperties": false,
|
||||||
|
@ -2502,7 +2628,7 @@
|
||||||
"type": "object",
|
"type": "object",
|
||||||
"properties": {
|
"properties": {
|
||||||
"no_alert_for_skipped_runs": {
|
"no_alert_for_skipped_runs": {
|
||||||
"description": "If true, do not send email to recipients specified in `on_failure` if the run is skipped.",
|
"description": "If true, do not send email to recipients specified in `on_failure` if the run is skipped.\nThis field is `deprecated`. Please use the `notification_settings.no_alert_for_skipped_runs` field.",
|
||||||
"$ref": "#/$defs/bool"
|
"$ref": "#/$defs/bool"
|
||||||
},
|
},
|
||||||
"on_duration_warning_threshold_exceeded": {
|
"on_duration_warning_threshold_exceeded": {
|
||||||
|
@ -2610,7 +2736,7 @@
|
||||||
"anyOf": [
|
"anyOf": [
|
||||||
{
|
{
|
||||||
"type": "object",
|
"type": "object",
|
||||||
"description": "Write-only setting, available only in Create/Update/Reset and Submit calls. Specifies the user or service principal that the job runs as. If not specified, the job runs as the user who created the job.\n\nOnly `user_name` or `service_principal_name` can be specified. If both are specified, an error is thrown.",
|
"description": "Write-only setting. Specifies the user, service principal or group that the job/pipeline runs as. If not specified, the job/pipeline runs as the user who created the job/pipeline.\n\nExactly one of `user_name`, `service_principal_name`, `group_name` should be specified. If not, an error is thrown.",
|
||||||
"properties": {
|
"properties": {
|
||||||
"service_principal_name": {
|
"service_principal_name": {
|
||||||
"description": "Application ID of an active service principal. Setting this field requires the `servicePrincipal/user` role.",
|
"description": "Application ID of an active service principal. Setting this field requires the `servicePrincipal/user` role.",
|
||||||
|
@ -2943,6 +3069,7 @@
|
||||||
"$ref": "#/$defs/map/string"
|
"$ref": "#/$defs/map/string"
|
||||||
},
|
},
|
||||||
"pipeline_params": {
|
"pipeline_params": {
|
||||||
|
"description": "Controls whether the pipeline should perform a full refresh",
|
||||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.PipelineParams"
|
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.PipelineParams"
|
||||||
},
|
},
|
||||||
"python_named_params": {
|
"python_named_params": {
|
||||||
|
@ -3417,7 +3544,7 @@
|
||||||
"type": "object",
|
"type": "object",
|
||||||
"properties": {
|
"properties": {
|
||||||
"no_alert_for_skipped_runs": {
|
"no_alert_for_skipped_runs": {
|
||||||
"description": "If true, do not send email to recipients specified in `on_failure` if the run is skipped.",
|
"description": "If true, do not send email to recipients specified in `on_failure` if the run is skipped.\nThis field is `deprecated`. Please use the `notification_settings.no_alert_for_skipped_runs` field.",
|
||||||
"$ref": "#/$defs/bool"
|
"$ref": "#/$defs/bool"
|
||||||
},
|
},
|
||||||
"on_duration_warning_threshold_exceeded": {
|
"on_duration_warning_threshold_exceeded": {
|
||||||
|
@ -4235,6 +4362,207 @@
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
"serving.AiGatewayConfig": {
|
||||||
|
"anyOf": [
|
||||||
|
{
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"guardrails": {
|
||||||
|
"description": "Configuration for AI Guardrails to prevent unwanted data and unsafe data in requests and responses.",
|
||||||
|
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/serving.AiGatewayGuardrails"
|
||||||
|
},
|
||||||
|
"inference_table_config": {
|
||||||
|
"description": "Configuration for payload logging using inference tables. Use these tables to monitor and audit data being sent to and received from model APIs and to improve model quality.",
|
||||||
|
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/serving.AiGatewayInferenceTableConfig"
|
||||||
|
},
|
||||||
|
"rate_limits": {
|
||||||
|
"description": "Configuration for rate limits which can be set to limit endpoint traffic.",
|
||||||
|
"$ref": "#/$defs/slice/github.com/databricks/databricks-sdk-go/service/serving.AiGatewayRateLimit"
|
||||||
|
},
|
||||||
|
"usage_tracking_config": {
|
||||||
|
"description": "Configuration to enable usage tracking using system tables. These tables allow you to monitor operational usage on endpoints and their associated costs.",
|
||||||
|
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/serving.AiGatewayUsageTrackingConfig"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"additionalProperties": false
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "string",
|
||||||
|
"pattern": "\\$\\{(var(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)+)\\}"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"serving.AiGatewayGuardrailParameters": {
|
||||||
|
"anyOf": [
|
||||||
|
{
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"invalid_keywords": {
|
||||||
|
"description": "List of invalid keywords. AI guardrail uses keyword or string matching to decide if the keyword exists in the request or response content.",
|
||||||
|
"$ref": "#/$defs/slice/string"
|
||||||
|
},
|
||||||
|
"pii": {
|
||||||
|
"description": "Configuration for guardrail PII filter.",
|
||||||
|
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/serving.AiGatewayGuardrailPiiBehavior"
|
||||||
|
},
|
||||||
|
"safety": {
|
||||||
|
"description": "Indicates whether the safety filter is enabled.",
|
||||||
|
"$ref": "#/$defs/bool"
|
||||||
|
},
|
||||||
|
"valid_topics": {
|
||||||
|
"description": "The list of allowed topics. Given a chat request, this guardrail flags the request if its topic is not in the allowed topics.",
|
||||||
|
"$ref": "#/$defs/slice/string"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"additionalProperties": false
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "string",
|
||||||
|
"pattern": "\\$\\{(var(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)+)\\}"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"serving.AiGatewayGuardrailPiiBehavior": {
|
||||||
|
"anyOf": [
|
||||||
|
{
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"behavior": {
|
||||||
|
"description": "Behavior for PII filter. Currently only 'BLOCK' is supported. If 'BLOCK' is set for the input guardrail and the request contains PII, the request is not sent to the model server and 400 status code is returned; if 'BLOCK' is set for the output guardrail and the model response contains PII, the PII info in the response is redacted and 400 status code is returned.",
|
||||||
|
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/serving.AiGatewayGuardrailPiiBehaviorBehavior",
|
||||||
|
"enum": [
|
||||||
|
"NONE",
|
||||||
|
"BLOCK"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"additionalProperties": false,
|
||||||
|
"required": [
|
||||||
|
"behavior"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "string",
|
||||||
|
"pattern": "\\$\\{(var(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)+)\\}"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"serving.AiGatewayGuardrailPiiBehaviorBehavior": {
|
||||||
|
"type": "string"
|
||||||
|
},
|
||||||
|
"serving.AiGatewayGuardrails": {
|
||||||
|
"anyOf": [
|
||||||
|
{
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"input": {
|
||||||
|
"description": "Configuration for input guardrail filters.",
|
||||||
|
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/serving.AiGatewayGuardrailParameters"
|
||||||
|
},
|
||||||
|
"output": {
|
||||||
|
"description": "Configuration for output guardrail filters.",
|
||||||
|
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/serving.AiGatewayGuardrailParameters"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"additionalProperties": false
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "string",
|
||||||
|
"pattern": "\\$\\{(var(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)+)\\}"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"serving.AiGatewayInferenceTableConfig": {
|
||||||
|
"anyOf": [
|
||||||
|
{
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"catalog_name": {
|
||||||
|
"description": "The name of the catalog in Unity Catalog. Required when enabling inference tables. NOTE: On update, you have to disable inference table first in order to change the catalog name.",
|
||||||
|
"$ref": "#/$defs/string"
|
||||||
|
},
|
||||||
|
"enabled": {
|
||||||
|
"description": "Indicates whether the inference table is enabled.",
|
||||||
|
"$ref": "#/$defs/bool"
|
||||||
|
},
|
||||||
|
"schema_name": {
|
||||||
|
"description": "The name of the schema in Unity Catalog. Required when enabling inference tables. NOTE: On update, you have to disable inference table first in order to change the schema name.",
|
||||||
|
"$ref": "#/$defs/string"
|
||||||
|
},
|
||||||
|
"table_name_prefix": {
|
||||||
|
"description": "The prefix of the table in Unity Catalog. NOTE: On update, you have to disable inference table first in order to change the prefix name.",
|
||||||
|
"$ref": "#/$defs/string"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"additionalProperties": false
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "string",
|
||||||
|
"pattern": "\\$\\{(var(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)+)\\}"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"serving.AiGatewayRateLimit": {
|
||||||
|
"anyOf": [
|
||||||
|
{
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"calls": {
|
||||||
|
"description": "Used to specify how many calls are allowed for a key within the renewal_period.",
|
||||||
|
"$ref": "#/$defs/int"
|
||||||
|
},
|
||||||
|
"key": {
|
||||||
|
"description": "Key field for a rate limit. Currently, only 'user' and 'endpoint' are supported, with 'endpoint' being the default if not specified.",
|
||||||
|
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/serving.AiGatewayRateLimitKey",
|
||||||
|
"enum": [
|
||||||
|
"user",
|
||||||
|
"endpoint"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"renewal_period": {
|
||||||
|
"description": "Renewal period field for a rate limit. Currently, only 'minute' is supported.",
|
||||||
|
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/serving.AiGatewayRateLimitRenewalPeriod",
|
||||||
|
"enum": [
|
||||||
|
"minute"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"additionalProperties": false,
|
||||||
|
"required": [
|
||||||
|
"calls",
|
||||||
|
"renewal_period"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "string",
|
||||||
|
"pattern": "\\$\\{(var(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)+)\\}"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"serving.AiGatewayRateLimitKey": {
|
||||||
|
"type": "string"
|
||||||
|
},
|
||||||
|
"serving.AiGatewayRateLimitRenewalPeriod": {
|
||||||
|
"type": "string"
|
||||||
|
},
|
||||||
|
"serving.AiGatewayUsageTrackingConfig": {
|
||||||
|
"anyOf": [
|
||||||
|
{
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"enabled": {
|
||||||
|
"description": "Whether to enable usage tracking.",
|
||||||
|
"$ref": "#/$defs/bool"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"additionalProperties": false
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "string",
|
||||||
|
"pattern": "\\$\\{(var(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)+)\\}"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
"serving.AmazonBedrockConfig": {
|
"serving.AmazonBedrockConfig": {
|
||||||
"anyOf": [
|
"anyOf": [
|
||||||
{
|
{
|
||||||
|
@ -4904,6 +5232,20 @@
|
||||||
"cli": {
|
"cli": {
|
||||||
"bundle": {
|
"bundle": {
|
||||||
"config": {
|
"config": {
|
||||||
|
"resources.Cluster": {
|
||||||
|
"anyOf": [
|
||||||
|
{
|
||||||
|
"type": "object",
|
||||||
|
"additionalProperties": {
|
||||||
|
"$ref": "#/$defs/github.com/databricks/cli/bundle/config/resources.Cluster"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "string",
|
||||||
|
"pattern": "\\$\\{(var(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)+)\\}"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
"resources.Job": {
|
"resources.Job": {
|
||||||
"anyOf": [
|
"anyOf": [
|
||||||
{
|
{
|
||||||
|
@ -5425,6 +5767,20 @@
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
"serving.AiGatewayRateLimit": {
|
||||||
|
"anyOf": [
|
||||||
|
{
|
||||||
|
"type": "array",
|
||||||
|
"items": {
|
||||||
|
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/serving.AiGatewayRateLimit"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "string",
|
||||||
|
"pattern": "\\$\\{(var(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)+)\\}"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
"serving.EndpointTag": {
|
"serving.EndpointTag": {
|
||||||
"anyOf": [
|
"anyOf": [
|
||||||
{
|
{
|
||||||
|
|
|
@ -0,0 +1,33 @@
|
||||||
|
bundle:
|
||||||
|
name: issue_1828
|
||||||
|
|
||||||
|
variables:
|
||||||
|
# One entry for each of the underlying YAML (or [dyn.Kind]) types.
|
||||||
|
# The test confirms we can convert to and from the typed configuration without losing information.
|
||||||
|
|
||||||
|
map:
|
||||||
|
default:
|
||||||
|
foo: bar
|
||||||
|
|
||||||
|
sequence:
|
||||||
|
default:
|
||||||
|
- foo
|
||||||
|
- bar
|
||||||
|
|
||||||
|
string:
|
||||||
|
default: foo
|
||||||
|
|
||||||
|
bool:
|
||||||
|
default: true
|
||||||
|
|
||||||
|
int:
|
||||||
|
default: 42
|
||||||
|
|
||||||
|
float:
|
||||||
|
default: 3.14
|
||||||
|
|
||||||
|
time:
|
||||||
|
default: 2021-01-01
|
||||||
|
|
||||||
|
nil:
|
||||||
|
default:
|
|
@ -0,0 +1,48 @@
|
||||||
|
package config_tests
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestIssue1828(t *testing.T) {
|
||||||
|
b := load(t, "./issue_1828")
|
||||||
|
|
||||||
|
if assert.Contains(t, b.Config.Variables, "map") {
|
||||||
|
assert.Equal(t, map[string]any{
|
||||||
|
"foo": "bar",
|
||||||
|
}, b.Config.Variables["map"].Default)
|
||||||
|
}
|
||||||
|
|
||||||
|
if assert.Contains(t, b.Config.Variables, "sequence") {
|
||||||
|
assert.Equal(t, []any{
|
||||||
|
"foo",
|
||||||
|
"bar",
|
||||||
|
}, b.Config.Variables["sequence"].Default)
|
||||||
|
}
|
||||||
|
|
||||||
|
if assert.Contains(t, b.Config.Variables, "string") {
|
||||||
|
assert.Equal(t, "foo", b.Config.Variables["string"].Default)
|
||||||
|
}
|
||||||
|
|
||||||
|
if assert.Contains(t, b.Config.Variables, "bool") {
|
||||||
|
assert.Equal(t, true, b.Config.Variables["bool"].Default)
|
||||||
|
}
|
||||||
|
|
||||||
|
if assert.Contains(t, b.Config.Variables, "int") {
|
||||||
|
assert.Equal(t, 42, b.Config.Variables["int"].Default)
|
||||||
|
}
|
||||||
|
|
||||||
|
if assert.Contains(t, b.Config.Variables, "float") {
|
||||||
|
assert.Equal(t, 3.14, b.Config.Variables["float"].Default)
|
||||||
|
}
|
||||||
|
|
||||||
|
if assert.Contains(t, b.Config.Variables, "time") {
|
||||||
|
assert.Equal(t, "2021-01-01", b.Config.Variables["time"].Default)
|
||||||
|
}
|
||||||
|
|
||||||
|
if assert.Contains(t, b.Config.Variables, "nil") {
|
||||||
|
assert.Equal(t, nil, b.Config.Variables["nil"].Default)
|
||||||
|
}
|
||||||
|
}
|
|
@ -11,7 +11,7 @@ func TestExpandPipelineGlobPaths(t *testing.T) {
|
||||||
require.NoError(t, diags.Error())
|
require.NoError(t, diags.Error())
|
||||||
require.Equal(
|
require.Equal(
|
||||||
t,
|
t,
|
||||||
"/Users/user@domain.com/.bundle/pipeline_glob_paths/default/files/dlt/nyc_taxi_loader",
|
"/Workspace/Users/user@domain.com/.bundle/pipeline_glob_paths/default/files/dlt/nyc_taxi_loader",
|
||||||
b.Config.Resources.Pipelines["nyc_taxi_pipeline"].Libraries[0].Notebook.Path,
|
b.Config.Resources.Pipelines["nyc_taxi_pipeline"].Libraries[0].Notebook.Path,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
|
@ -12,9 +12,9 @@ func TestRelativePathTranslationDefault(t *testing.T) {
|
||||||
require.NoError(t, diags.Error())
|
require.NoError(t, diags.Error())
|
||||||
|
|
||||||
t0 := b.Config.Resources.Jobs["job"].Tasks[0]
|
t0 := b.Config.Resources.Jobs["job"].Tasks[0]
|
||||||
assert.Equal(t, "/remote/src/file1.py", t0.SparkPythonTask.PythonFile)
|
assert.Equal(t, "/Workspace/remote/src/file1.py", t0.SparkPythonTask.PythonFile)
|
||||||
t1 := b.Config.Resources.Jobs["job"].Tasks[1]
|
t1 := b.Config.Resources.Jobs["job"].Tasks[1]
|
||||||
assert.Equal(t, "/remote/src/file1.py", t1.SparkPythonTask.PythonFile)
|
assert.Equal(t, "/Workspace/remote/src/file1.py", t1.SparkPythonTask.PythonFile)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestRelativePathTranslationOverride(t *testing.T) {
|
func TestRelativePathTranslationOverride(t *testing.T) {
|
||||||
|
@ -22,7 +22,7 @@ func TestRelativePathTranslationOverride(t *testing.T) {
|
||||||
require.NoError(t, diags.Error())
|
require.NoError(t, diags.Error())
|
||||||
|
|
||||||
t0 := b.Config.Resources.Jobs["job"].Tasks[0]
|
t0 := b.Config.Resources.Jobs["job"].Tasks[0]
|
||||||
assert.Equal(t, "/remote/src/file2.py", t0.SparkPythonTask.PythonFile)
|
assert.Equal(t, "/Workspace/remote/src/file2.py", t0.SparkPythonTask.PythonFile)
|
||||||
t1 := b.Config.Resources.Jobs["job"].Tasks[1]
|
t1 := b.Config.Resources.Jobs["job"].Tasks[1]
|
||||||
assert.Equal(t, "/remote/src/file2.py", t1.SparkPythonTask.PythonFile)
|
assert.Equal(t, "/Workspace/remote/src/file2.py", t1.SparkPythonTask.PythonFile)
|
||||||
}
|
}
|
||||||
|
|
|
@ -3,7 +3,6 @@ package config_tests
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"path/filepath"
|
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle"
|
"github.com/databricks/cli/bundle"
|
||||||
|
@ -113,8 +112,9 @@ func TestRunAsErrorForPipelines(t *testing.T) {
|
||||||
diags := bundle.Apply(ctx, b, mutator.SetRunAs())
|
diags := bundle.Apply(ctx, b, mutator.SetRunAs())
|
||||||
err := diags.Error()
|
err := diags.Error()
|
||||||
|
|
||||||
configPath := filepath.FromSlash("run_as/not_allowed/pipelines/databricks.yml")
|
assert.ErrorContains(t, err, "pipelines do not support a setting a run_as user that is different from the owner.\n"+
|
||||||
assert.EqualError(t, err, fmt.Sprintf("pipelines are not supported when the current deployment user is different from the bundle's run_as identity. Please deploy as the run_as identity. Please refer to the documentation at https://docs.databricks.com/dev-tools/bundles/run-as.html for more details. Location of the unsupported resource: %s:14:5. Current identity: jane@doe.com. Run as identity: my_service_principal", configPath))
|
"Current identity: jane@doe.com. Run as identity: my_service_principal.\n"+
|
||||||
|
"See https://docs")
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestRunAsNoErrorForPipelines(t *testing.T) {
|
func TestRunAsNoErrorForPipelines(t *testing.T) {
|
||||||
|
@ -152,8 +152,9 @@ func TestRunAsErrorForModelServing(t *testing.T) {
|
||||||
diags := bundle.Apply(ctx, b, mutator.SetRunAs())
|
diags := bundle.Apply(ctx, b, mutator.SetRunAs())
|
||||||
err := diags.Error()
|
err := diags.Error()
|
||||||
|
|
||||||
configPath := filepath.FromSlash("run_as/not_allowed/model_serving/databricks.yml")
|
assert.ErrorContains(t, err, "model_serving_endpoints do not support a setting a run_as user that is different from the owner.\n"+
|
||||||
assert.EqualError(t, err, fmt.Sprintf("model_serving_endpoints are not supported when the current deployment user is different from the bundle's run_as identity. Please deploy as the run_as identity. Please refer to the documentation at https://docs.databricks.com/dev-tools/bundles/run-as.html for more details. Location of the unsupported resource: %s:14:5. Current identity: jane@doe.com. Run as identity: my_service_principal", configPath))
|
"Current identity: jane@doe.com. Run as identity: my_service_principal.\n"+
|
||||||
|
"See https://docs")
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestRunAsNoErrorForModelServingEndpoints(t *testing.T) {
|
func TestRunAsNoErrorForModelServingEndpoints(t *testing.T) {
|
||||||
|
@ -191,8 +192,7 @@ func TestRunAsErrorWhenBothUserAndSpSpecified(t *testing.T) {
|
||||||
diags := bundle.Apply(ctx, b, mutator.SetRunAs())
|
diags := bundle.Apply(ctx, b, mutator.SetRunAs())
|
||||||
err := diags.Error()
|
err := diags.Error()
|
||||||
|
|
||||||
configPath := filepath.FromSlash("run_as/not_allowed/both_sp_and_user/databricks.yml")
|
assert.ErrorContains(t, err, "run_as section cannot specify both user_name and service_principal_name")
|
||||||
assert.EqualError(t, err, fmt.Sprintf("run_as section must specify exactly one identity. A service_principal_name \"my_service_principal\" is specified at %s:6:27. A user_name \"my_user_name\" is defined at %s:7:14", configPath, configPath))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestRunAsErrorNeitherUserOrSpSpecified(t *testing.T) {
|
func TestRunAsErrorNeitherUserOrSpSpecified(t *testing.T) {
|
||||||
|
@ -202,19 +202,19 @@ func TestRunAsErrorNeitherUserOrSpSpecified(t *testing.T) {
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
name: "empty_run_as",
|
name: "empty_run_as",
|
||||||
err: fmt.Sprintf("run_as section must specify exactly one identity. Neither service_principal_name nor user_name is specified at %s:4:8", filepath.FromSlash("run_as/not_allowed/neither_sp_nor_user/empty_run_as/databricks.yml")),
|
err: "run_as section must specify exactly one identity. Neither service_principal_name nor user_name is specified",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "empty_sp",
|
name: "empty_sp",
|
||||||
err: fmt.Sprintf("run_as section must specify exactly one identity. Neither service_principal_name nor user_name is specified at %s:5:3", filepath.FromSlash("run_as/not_allowed/neither_sp_nor_user/empty_sp/databricks.yml")),
|
err: "run_as section must specify exactly one identity. Neither service_principal_name nor user_name is specified",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "empty_user",
|
name: "empty_user",
|
||||||
err: fmt.Sprintf("run_as section must specify exactly one identity. Neither service_principal_name nor user_name is specified at %s:5:3", filepath.FromSlash("run_as/not_allowed/neither_sp_nor_user/empty_user/databricks.yml")),
|
err: "run_as section must specify exactly one identity. Neither service_principal_name nor user_name is specified",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "empty_user_and_sp",
|
name: "empty_user_and_sp",
|
||||||
err: fmt.Sprintf("run_as section must specify exactly one identity. Neither service_principal_name nor user_name is specified at %s:5:3", filepath.FromSlash("run_as/not_allowed/neither_sp_nor_user/empty_user_and_sp/databricks.yml")),
|
err: "run_as section must specify exactly one identity. Neither service_principal_name nor user_name is specified",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -257,8 +257,7 @@ func TestRunAsErrorNeitherUserOrSpSpecifiedAtTargetOverride(t *testing.T) {
|
||||||
diags := bundle.Apply(ctx, b, mutator.SetRunAs())
|
diags := bundle.Apply(ctx, b, mutator.SetRunAs())
|
||||||
err := diags.Error()
|
err := diags.Error()
|
||||||
|
|
||||||
configPath := filepath.FromSlash("run_as/not_allowed/neither_sp_nor_user/override/override.yml")
|
assert.EqualError(t, err, "run_as section must specify exactly one identity. Neither service_principal_name nor user_name is specified")
|
||||||
assert.EqualError(t, err, fmt.Sprintf("run_as section must specify exactly one identity. Neither service_principal_name nor user_name is specified at %s:4:12", configPath))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestLegacyRunAs(t *testing.T) {
|
func TestLegacyRunAs(t *testing.T) {
|
||||||
|
|
|
@ -205,9 +205,15 @@ func newUpdateRuleSet() *cobra.Command {
|
||||||
a := root.AccountClient(ctx)
|
a := root.AccountClient(ctx)
|
||||||
|
|
||||||
if cmd.Flags().Changed("json") {
|
if cmd.Flags().Changed("json") {
|
||||||
err = updateRuleSetJson.Unmarshal(&updateRuleSetReq)
|
diags := updateRuleSetJson.Unmarshal(&updateRuleSetReq)
|
||||||
if err != nil {
|
if diags.HasError() {
|
||||||
return err
|
return diags.Error()
|
||||||
|
}
|
||||||
|
if len(diags) > 0 {
|
||||||
|
err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
return fmt.Errorf("please provide command input in JSON format by specifying the --json flag")
|
return fmt.Errorf("please provide command input in JSON format by specifying the --json flag")
|
||||||
|
|
|
@ -78,9 +78,15 @@ func newCreate() *cobra.Command {
|
||||||
a := root.AccountClient(ctx)
|
a := root.AccountClient(ctx)
|
||||||
|
|
||||||
if cmd.Flags().Changed("json") {
|
if cmd.Flags().Changed("json") {
|
||||||
err = createJson.Unmarshal(&createReq)
|
diags := createJson.Unmarshal(&createReq)
|
||||||
if err != nil {
|
if diags.HasError() {
|
||||||
return err
|
return diags.Error()
|
||||||
|
}
|
||||||
|
if len(diags) > 0 {
|
||||||
|
err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
return fmt.Errorf("please provide command input in JSON format by specifying the --json flag")
|
return fmt.Errorf("please provide command input in JSON format by specifying the --json flag")
|
||||||
|
@ -316,9 +322,15 @@ func newUpdate() *cobra.Command {
|
||||||
a := root.AccountClient(ctx)
|
a := root.AccountClient(ctx)
|
||||||
|
|
||||||
if cmd.Flags().Changed("json") {
|
if cmd.Flags().Changed("json") {
|
||||||
err = updateJson.Unmarshal(&updateReq)
|
diags := updateJson.Unmarshal(&updateReq)
|
||||||
if err != nil {
|
if diags.HasError() {
|
||||||
return err
|
return diags.Error()
|
||||||
|
}
|
||||||
|
if len(diags) > 0 {
|
||||||
|
err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
return fmt.Errorf("please provide command input in JSON format by specifying the --json flag")
|
return fmt.Errorf("please provide command input in JSON format by specifying the --json flag")
|
||||||
|
|
|
@ -90,9 +90,15 @@ func newCreate() *cobra.Command {
|
||||||
a := root.AccountClient(ctx)
|
a := root.AccountClient(ctx)
|
||||||
|
|
||||||
if cmd.Flags().Changed("json") {
|
if cmd.Flags().Changed("json") {
|
||||||
err = createJson.Unmarshal(&createReq)
|
diags := createJson.Unmarshal(&createReq)
|
||||||
if err != nil {
|
if diags.HasError() {
|
||||||
return err
|
return diags.Error()
|
||||||
|
}
|
||||||
|
if len(diags) > 0 {
|
||||||
|
err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
return fmt.Errorf("please provide command input in JSON format by specifying the --json flag")
|
return fmt.Errorf("please provide command input in JSON format by specifying the --json flag")
|
||||||
|
|
|
@ -129,9 +129,15 @@ func newUpdate() *cobra.Command {
|
||||||
a := root.AccountClient(ctx)
|
a := root.AccountClient(ctx)
|
||||||
|
|
||||||
if cmd.Flags().Changed("json") {
|
if cmd.Flags().Changed("json") {
|
||||||
err = updateJson.Unmarshal(&updateReq)
|
diags := updateJson.Unmarshal(&updateReq)
|
||||||
if err != nil {
|
if diags.HasError() {
|
||||||
return err
|
return diags.Error()
|
||||||
|
}
|
||||||
|
if len(diags) > 0 {
|
||||||
|
err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
return fmt.Errorf("please provide command input in JSON format by specifying the --json flag")
|
return fmt.Errorf("please provide command input in JSON format by specifying the --json flag")
|
||||||
|
|
|
@ -88,9 +88,15 @@ func newCreate() *cobra.Command {
|
||||||
a := root.AccountClient(ctx)
|
a := root.AccountClient(ctx)
|
||||||
|
|
||||||
if cmd.Flags().Changed("json") {
|
if cmd.Flags().Changed("json") {
|
||||||
err = createJson.Unmarshal(&createReq)
|
diags := createJson.Unmarshal(&createReq)
|
||||||
if err != nil {
|
if diags.HasError() {
|
||||||
return err
|
return diags.Error()
|
||||||
|
}
|
||||||
|
if len(diags) > 0 {
|
||||||
|
err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -320,9 +326,15 @@ func newUpdate() *cobra.Command {
|
||||||
a := root.AccountClient(ctx)
|
a := root.AccountClient(ctx)
|
||||||
|
|
||||||
if cmd.Flags().Changed("json") {
|
if cmd.Flags().Changed("json") {
|
||||||
err = updateJson.Unmarshal(&updateReq)
|
diags := updateJson.Unmarshal(&updateReq)
|
||||||
if err != nil {
|
if diags.HasError() {
|
||||||
return err
|
return diags.Error()
|
||||||
|
}
|
||||||
|
if len(diags) > 0 {
|
||||||
|
err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
updateReq.IntegrationId = args[0]
|
updateReq.IntegrationId = args[0]
|
||||||
|
|
|
@ -0,0 +1,221 @@
|
||||||
|
// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT.
|
||||||
|
|
||||||
|
package disable_legacy_features
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/databricks/cli/cmd/root"
|
||||||
|
"github.com/databricks/cli/libs/cmdio"
|
||||||
|
"github.com/databricks/cli/libs/flags"
|
||||||
|
"github.com/databricks/databricks-sdk-go/service/settings"
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Slice with functions to override default command behavior.
|
||||||
|
// Functions can be added from the `init()` function in manually curated files in this directory.
|
||||||
|
var cmdOverrides []func(*cobra.Command)
|
||||||
|
|
||||||
|
func New() *cobra.Command {
|
||||||
|
cmd := &cobra.Command{
|
||||||
|
Use: "disable-legacy-features",
|
||||||
|
Short: `Disable legacy features for new Databricks workspaces.`,
|
||||||
|
Long: `Disable legacy features for new Databricks workspaces.
|
||||||
|
|
||||||
|
For newly created workspaces: 1. Disables the use of DBFS root and mounts. 2.
|
||||||
|
Hive Metastore will not be provisioned. 3. Disables the use of ‘No-isolation
|
||||||
|
clusters’. 4. Disables Databricks Runtime versions prior to 13.3LTS.`,
|
||||||
|
|
||||||
|
// This service is being previewed; hide from help output.
|
||||||
|
Hidden: true,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add methods
|
||||||
|
cmd.AddCommand(newDelete())
|
||||||
|
cmd.AddCommand(newGet())
|
||||||
|
cmd.AddCommand(newUpdate())
|
||||||
|
|
||||||
|
// Apply optional overrides to this command.
|
||||||
|
for _, fn := range cmdOverrides {
|
||||||
|
fn(cmd)
|
||||||
|
}
|
||||||
|
|
||||||
|
return cmd
|
||||||
|
}
|
||||||
|
|
||||||
|
// start delete command
|
||||||
|
|
||||||
|
// Slice with functions to override default command behavior.
|
||||||
|
// Functions can be added from the `init()` function in manually curated files in this directory.
|
||||||
|
var deleteOverrides []func(
|
||||||
|
*cobra.Command,
|
||||||
|
*settings.DeleteDisableLegacyFeaturesRequest,
|
||||||
|
)
|
||||||
|
|
||||||
|
func newDelete() *cobra.Command {
|
||||||
|
cmd := &cobra.Command{}
|
||||||
|
|
||||||
|
var deleteReq settings.DeleteDisableLegacyFeaturesRequest
|
||||||
|
|
||||||
|
// TODO: short flags
|
||||||
|
|
||||||
|
cmd.Flags().StringVar(&deleteReq.Etag, "etag", deleteReq.Etag, `etag used for versioning.`)
|
||||||
|
|
||||||
|
cmd.Use = "delete"
|
||||||
|
cmd.Short = `Delete the disable legacy features setting.`
|
||||||
|
cmd.Long = `Delete the disable legacy features setting.
|
||||||
|
|
||||||
|
Deletes the disable legacy features setting.`
|
||||||
|
|
||||||
|
cmd.Annotations = make(map[string]string)
|
||||||
|
|
||||||
|
cmd.Args = func(cmd *cobra.Command, args []string) error {
|
||||||
|
check := root.ExactArgs(0)
|
||||||
|
return check(cmd, args)
|
||||||
|
}
|
||||||
|
|
||||||
|
cmd.PreRunE = root.MustAccountClient
|
||||||
|
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
|
||||||
|
ctx := cmd.Context()
|
||||||
|
a := root.AccountClient(ctx)
|
||||||
|
|
||||||
|
response, err := a.Settings.DisableLegacyFeatures().Delete(ctx, deleteReq)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return cmdio.Render(ctx, response)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Disable completions since they are not applicable.
|
||||||
|
// Can be overridden by manual implementation in `override.go`.
|
||||||
|
cmd.ValidArgsFunction = cobra.NoFileCompletions
|
||||||
|
|
||||||
|
// Apply optional overrides to this command.
|
||||||
|
for _, fn := range deleteOverrides {
|
||||||
|
fn(cmd, &deleteReq)
|
||||||
|
}
|
||||||
|
|
||||||
|
return cmd
|
||||||
|
}
|
||||||
|
|
||||||
|
// start get command
|
||||||
|
|
||||||
|
// Slice with functions to override default command behavior.
|
||||||
|
// Functions can be added from the `init()` function in manually curated files in this directory.
|
||||||
|
var getOverrides []func(
|
||||||
|
*cobra.Command,
|
||||||
|
*settings.GetDisableLegacyFeaturesRequest,
|
||||||
|
)
|
||||||
|
|
||||||
|
func newGet() *cobra.Command {
|
||||||
|
cmd := &cobra.Command{}
|
||||||
|
|
||||||
|
var getReq settings.GetDisableLegacyFeaturesRequest
|
||||||
|
|
||||||
|
// TODO: short flags
|
||||||
|
|
||||||
|
cmd.Flags().StringVar(&getReq.Etag, "etag", getReq.Etag, `etag used for versioning.`)
|
||||||
|
|
||||||
|
cmd.Use = "get"
|
||||||
|
cmd.Short = `Get the disable legacy features setting.`
|
||||||
|
cmd.Long = `Get the disable legacy features setting.
|
||||||
|
|
||||||
|
Gets the value of the disable legacy features setting.`
|
||||||
|
|
||||||
|
cmd.Annotations = make(map[string]string)
|
||||||
|
|
||||||
|
cmd.Args = func(cmd *cobra.Command, args []string) error {
|
||||||
|
check := root.ExactArgs(0)
|
||||||
|
return check(cmd, args)
|
||||||
|
}
|
||||||
|
|
||||||
|
cmd.PreRunE = root.MustAccountClient
|
||||||
|
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
|
||||||
|
ctx := cmd.Context()
|
||||||
|
a := root.AccountClient(ctx)
|
||||||
|
|
||||||
|
response, err := a.Settings.DisableLegacyFeatures().Get(ctx, getReq)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return cmdio.Render(ctx, response)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Disable completions since they are not applicable.
|
||||||
|
// Can be overridden by manual implementation in `override.go`.
|
||||||
|
cmd.ValidArgsFunction = cobra.NoFileCompletions
|
||||||
|
|
||||||
|
// Apply optional overrides to this command.
|
||||||
|
for _, fn := range getOverrides {
|
||||||
|
fn(cmd, &getReq)
|
||||||
|
}
|
||||||
|
|
||||||
|
return cmd
|
||||||
|
}
|
||||||
|
|
||||||
|
// start update command
|
||||||
|
|
||||||
|
// Slice with functions to override default command behavior.
|
||||||
|
// Functions can be added from the `init()` function in manually curated files in this directory.
|
||||||
|
var updateOverrides []func(
|
||||||
|
*cobra.Command,
|
||||||
|
*settings.UpdateDisableLegacyFeaturesRequest,
|
||||||
|
)
|
||||||
|
|
||||||
|
func newUpdate() *cobra.Command {
|
||||||
|
cmd := &cobra.Command{}
|
||||||
|
|
||||||
|
var updateReq settings.UpdateDisableLegacyFeaturesRequest
|
||||||
|
var updateJson flags.JsonFlag
|
||||||
|
|
||||||
|
// TODO: short flags
|
||||||
|
cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`)
|
||||||
|
|
||||||
|
cmd.Use = "update"
|
||||||
|
cmd.Short = `Update the disable legacy features setting.`
|
||||||
|
cmd.Long = `Update the disable legacy features setting.
|
||||||
|
|
||||||
|
Updates the value of the disable legacy features setting.`
|
||||||
|
|
||||||
|
cmd.Annotations = make(map[string]string)
|
||||||
|
|
||||||
|
cmd.PreRunE = root.MustAccountClient
|
||||||
|
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
|
||||||
|
ctx := cmd.Context()
|
||||||
|
a := root.AccountClient(ctx)
|
||||||
|
|
||||||
|
if cmd.Flags().Changed("json") {
|
||||||
|
diags := updateJson.Unmarshal(&updateReq)
|
||||||
|
if diags.HasError() {
|
||||||
|
return diags.Error()
|
||||||
|
}
|
||||||
|
if len(diags) > 0 {
|
||||||
|
err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
return fmt.Errorf("please provide command input in JSON format by specifying the --json flag")
|
||||||
|
}
|
||||||
|
|
||||||
|
response, err := a.Settings.DisableLegacyFeatures().Update(ctx, updateReq)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return cmdio.Render(ctx, response)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Disable completions since they are not applicable.
|
||||||
|
// Can be overridden by manual implementation in `override.go`.
|
||||||
|
cmd.ValidArgsFunction = cobra.NoFileCompletions
|
||||||
|
|
||||||
|
// Apply optional overrides to this command.
|
||||||
|
for _, fn := range updateOverrides {
|
||||||
|
fn(cmd, &updateReq)
|
||||||
|
}
|
||||||
|
|
||||||
|
return cmd
|
||||||
|
}
|
||||||
|
|
||||||
|
// end service DisableLegacyFeatures
|
|
@ -107,9 +107,15 @@ func newCreate() *cobra.Command {
|
||||||
a := root.AccountClient(ctx)
|
a := root.AccountClient(ctx)
|
||||||
|
|
||||||
if cmd.Flags().Changed("json") {
|
if cmd.Flags().Changed("json") {
|
||||||
err = createJson.Unmarshal(&createReq)
|
diags := createJson.Unmarshal(&createReq)
|
||||||
if err != nil {
|
if diags.HasError() {
|
||||||
return err
|
return diags.Error()
|
||||||
|
}
|
||||||
|
if len(diags) > 0 {
|
||||||
|
err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
return fmt.Errorf("please provide command input in JSON format by specifying the --json flag")
|
return fmt.Errorf("please provide command input in JSON format by specifying the --json flag")
|
||||||
|
|
|
@ -127,9 +127,15 @@ func newUpdate() *cobra.Command {
|
||||||
a := root.AccountClient(ctx)
|
a := root.AccountClient(ctx)
|
||||||
|
|
||||||
if cmd.Flags().Changed("json") {
|
if cmd.Flags().Changed("json") {
|
||||||
err = updateJson.Unmarshal(&updateReq)
|
diags := updateJson.Unmarshal(&updateReq)
|
||||||
if err != nil {
|
if diags.HasError() {
|
||||||
return err
|
return diags.Error()
|
||||||
|
}
|
||||||
|
if len(diags) > 0 {
|
||||||
|
err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
return fmt.Errorf("please provide command input in JSON format by specifying the --json flag")
|
return fmt.Errorf("please provide command input in JSON format by specifying the --json flag")
|
||||||
|
|
|
@ -97,9 +97,15 @@ func newCreate() *cobra.Command {
|
||||||
a := root.AccountClient(ctx)
|
a := root.AccountClient(ctx)
|
||||||
|
|
||||||
if cmd.Flags().Changed("json") {
|
if cmd.Flags().Changed("json") {
|
||||||
err = createJson.Unmarshal(&createReq)
|
diags := createJson.Unmarshal(&createReq)
|
||||||
if err != nil {
|
if diags.HasError() {
|
||||||
return err
|
return diags.Error()
|
||||||
|
}
|
||||||
|
if len(diags) > 0 {
|
||||||
|
err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -358,9 +364,15 @@ func newPatch() *cobra.Command {
|
||||||
a := root.AccountClient(ctx)
|
a := root.AccountClient(ctx)
|
||||||
|
|
||||||
if cmd.Flags().Changed("json") {
|
if cmd.Flags().Changed("json") {
|
||||||
err = patchJson.Unmarshal(&patchReq)
|
diags := patchJson.Unmarshal(&patchReq)
|
||||||
if err != nil {
|
if diags.HasError() {
|
||||||
return err
|
return diags.Error()
|
||||||
|
}
|
||||||
|
if len(diags) > 0 {
|
||||||
|
err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if len(args) == 0 {
|
if len(args) == 0 {
|
||||||
|
@ -446,9 +458,15 @@ func newUpdate() *cobra.Command {
|
||||||
a := root.AccountClient(ctx)
|
a := root.AccountClient(ctx)
|
||||||
|
|
||||||
if cmd.Flags().Changed("json") {
|
if cmd.Flags().Changed("json") {
|
||||||
err = updateJson.Unmarshal(&updateReq)
|
diags := updateJson.Unmarshal(&updateReq)
|
||||||
if err != nil {
|
if diags.HasError() {
|
||||||
return err
|
return diags.Error()
|
||||||
|
}
|
||||||
|
if len(diags) > 0 {
|
||||||
|
err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if len(args) == 0 {
|
if len(args) == 0 {
|
||||||
|
|
|
@ -132,9 +132,15 @@ func newCreate() *cobra.Command {
|
||||||
a := root.AccountClient(ctx)
|
a := root.AccountClient(ctx)
|
||||||
|
|
||||||
if cmd.Flags().Changed("json") {
|
if cmd.Flags().Changed("json") {
|
||||||
err = createJson.Unmarshal(&createReq)
|
diags := createJson.Unmarshal(&createReq)
|
||||||
if err != nil {
|
if diags.HasError() {
|
||||||
return err
|
return diags.Error()
|
||||||
|
}
|
||||||
|
if len(diags) > 0 {
|
||||||
|
err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if !cmd.Flags().Changed("json") {
|
if !cmd.Flags().Changed("json") {
|
||||||
|
@ -411,9 +417,15 @@ func newReplace() *cobra.Command {
|
||||||
a := root.AccountClient(ctx)
|
a := root.AccountClient(ctx)
|
||||||
|
|
||||||
if cmd.Flags().Changed("json") {
|
if cmd.Flags().Changed("json") {
|
||||||
err = replaceJson.Unmarshal(&replaceReq)
|
diags := replaceJson.Unmarshal(&replaceReq)
|
||||||
if err != nil {
|
if diags.HasError() {
|
||||||
return err
|
return diags.Error()
|
||||||
|
}
|
||||||
|
if len(diags) > 0 {
|
||||||
|
err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
replaceReq.IpAccessListId = args[0]
|
replaceReq.IpAccessListId = args[0]
|
||||||
|
@ -505,9 +517,15 @@ func newUpdate() *cobra.Command {
|
||||||
a := root.AccountClient(ctx)
|
a := root.AccountClient(ctx)
|
||||||
|
|
||||||
if cmd.Flags().Changed("json") {
|
if cmd.Flags().Changed("json") {
|
||||||
err = updateJson.Unmarshal(&updateReq)
|
diags := updateJson.Unmarshal(&updateReq)
|
||||||
if err != nil {
|
if diags.HasError() {
|
||||||
return err
|
return diags.Error()
|
||||||
|
}
|
||||||
|
if len(diags) > 0 {
|
||||||
|
err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if len(args) == 0 {
|
if len(args) == 0 {
|
||||||
|
|
|
@ -162,9 +162,15 @@ func newCreate() *cobra.Command {
|
||||||
a := root.AccountClient(ctx)
|
a := root.AccountClient(ctx)
|
||||||
|
|
||||||
if cmd.Flags().Changed("json") {
|
if cmd.Flags().Changed("json") {
|
||||||
err = createJson.Unmarshal(&createReq)
|
diags := createJson.Unmarshal(&createReq)
|
||||||
if err != nil {
|
if diags.HasError() {
|
||||||
return err
|
return diags.Error()
|
||||||
|
}
|
||||||
|
if len(diags) > 0 {
|
||||||
|
err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -369,9 +375,15 @@ func newPatchStatus() *cobra.Command {
|
||||||
a := root.AccountClient(ctx)
|
a := root.AccountClient(ctx)
|
||||||
|
|
||||||
if cmd.Flags().Changed("json") {
|
if cmd.Flags().Changed("json") {
|
||||||
err = patchStatusJson.Unmarshal(&patchStatusReq)
|
diags := patchStatusJson.Unmarshal(&patchStatusReq)
|
||||||
if err != nil {
|
if diags.HasError() {
|
||||||
return err
|
return diags.Error()
|
||||||
|
}
|
||||||
|
if len(diags) > 0 {
|
||||||
|
err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
patchStatusReq.LogDeliveryConfigurationId = args[0]
|
patchStatusReq.LogDeliveryConfigurationId = args[0]
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue