Merge remote-tracking branch 'databricks/main' into cp-encourage-root-path

This commit is contained in:
Lennart Kats 2024-10-12 09:37:44 +02:00
commit 3f45f7179f
No known key found for this signature in database
GPG Key ID: 1EB8B57673197023
422 changed files with 17862 additions and 11882 deletions

View File

@ -11,10 +11,10 @@
"toolchain": {
"required": ["go"],
"post_generate": [
"go run ./bundle/internal/bundle/schema/main.go ./bundle/schema/docs/bundle_descriptions.json",
"go run ./bundle/internal/schema/*.go ./bundle/schema/jsonschema.json",
"echo 'bundle/internal/tf/schema/\\*.go linguist-generated=true' >> ./.gitattributes",
"echo 'go.sum linguist-generated=true' >> ./.gitattributes",
"echo 'bundle/schema/docs/bundle_descriptions.json linguist-generated=true' >> ./.gitattributes"
"echo 'bundle/schema/jsonschema.json linguist-generated=true' >> ./.gitattributes"
]
}
}

View File

@ -1 +1 @@
f98c07f9c71f579de65d2587bb0292f83d10e55d
0c86ea6dbd9a730c24ff0d4e509603e476955ac5

View File

@ -116,6 +116,10 @@ func allResolvers() *resolvers {
{{range .Services -}}
{{- if in $allowlist .KebabName -}}
r.{{.Singular.PascalName}} = func(ctx context.Context, w *databricks.WorkspaceClient, name string) (string, error) {
fn, ok := lookupOverrides["{{.Singular.PascalName}}"]
if ok {
return fn(ctx, w, name)
}
entity, err := w.{{.PascalName}}.GetBy{{range .NamedIdMap.NamePath}}{{.PascalName}}{{end}}(ctx, name)
if err != nil {
return "", err

View File

@ -5,6 +5,7 @@ package {{(.TrimPrefix "account").SnakeName}}
import (
"github.com/databricks/cli/libs/cmdio"
"github.com/databricks/cli/libs/flags"
"github.com/databricks/cli/libs/diag"
"github.com/databricks/cli/cmd/root"
"github.com/databricks/databricks-sdk-go/service/{{.Package.Name}}"
"github.com/spf13/cobra"
@ -154,6 +155,7 @@ func new{{.PascalName}}() *cobra.Command {
"provider-exchanges delete-listing-from-exchange"
"provider-exchanges list-exchanges-for-listing"
"provider-exchanges list-listings-for-exchange"
"storage-credentials get"
-}}
{{- $fullCommandName := (print $serviceName " " .KebabName) -}}
{{- $noPrompt := or .IsCrudCreate (in $excludeFromPrompts $fullCommandName) }}
@ -230,10 +232,16 @@ func new{{.PascalName}}() *cobra.Command {
{{- if .Request }}
{{ if .CanUseJson }}
if cmd.Flags().Changed("json") {
err = {{.CamelName}}Json.Unmarshal(&{{.CamelName}}Req)
diags := {{.CamelName}}Json.Unmarshal(&{{.CamelName}}Req)
if diags.HasError() {
return diags.Error()
}
if len(diags) > 0 {
err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags)
if err != nil {
return err
}
}
}{{end}}{{ if .MustUseJson }}else {
return fmt.Errorf("please provide command input in JSON format by specifying the --json flag")
}{{- end}}

8
.gitattributes vendored
View File

@ -6,6 +6,7 @@ cmd/account/cmd.go linguist-generated=true
cmd/account/credentials/credentials.go linguist-generated=true
cmd/account/csp-enablement-account/csp-enablement-account.go linguist-generated=true
cmd/account/custom-app-integration/custom-app-integration.go linguist-generated=true
cmd/account/disable-legacy-features/disable-legacy-features.go linguist-generated=true
cmd/account/encryption-keys/encryption-keys.go linguist-generated=true
cmd/account/esm-enablement-account/esm-enablement-account.go linguist-generated=true
cmd/account/groups/groups.go linguist-generated=true
@ -52,6 +53,7 @@ cmd/workspace/dashboard-widgets/dashboard-widgets.go linguist-generated=true
cmd/workspace/dashboards/dashboards.go linguist-generated=true
cmd/workspace/data-sources/data-sources.go linguist-generated=true
cmd/workspace/default-namespace/default-namespace.go linguist-generated=true
cmd/workspace/disable-legacy-access/disable-legacy-access.go linguist-generated=true
cmd/workspace/enhanced-security-monitoring/enhanced-security-monitoring.go linguist-generated=true
cmd/workspace/experiments/experiments.go linguist-generated=true
cmd/workspace/external-locations/external-locations.go linguist-generated=true
@ -75,6 +77,8 @@ cmd/workspace/online-tables/online-tables.go linguist-generated=true
cmd/workspace/permission-migration/permission-migration.go linguist-generated=true
cmd/workspace/permissions/permissions.go linguist-generated=true
cmd/workspace/pipelines/pipelines.go linguist-generated=true
cmd/workspace/policy-compliance-for-clusters/policy-compliance-for-clusters.go linguist-generated=true
cmd/workspace/policy-compliance-for-jobs/policy-compliance-for-jobs.go linguist-generated=true
cmd/workspace/policy-families/policy-families.go linguist-generated=true
cmd/workspace/provider-exchange-filters/provider-exchange-filters.go linguist-generated=true
cmd/workspace/provider-exchanges/provider-exchanges.go linguist-generated=true
@ -94,6 +98,7 @@ cmd/workspace/recipient-activation/recipient-activation.go linguist-generated=tr
cmd/workspace/recipients/recipients.go linguist-generated=true
cmd/workspace/registered-models/registered-models.go linguist-generated=true
cmd/workspace/repos/repos.go linguist-generated=true
cmd/workspace/resource-quotas/resource-quotas.go linguist-generated=true
cmd/workspace/restrict-workspace-admins/restrict-workspace-admins.go linguist-generated=true
cmd/workspace/schemas/schemas.go linguist-generated=true
cmd/workspace/secrets/secrets.go linguist-generated=true
@ -105,6 +110,7 @@ cmd/workspace/storage-credentials/storage-credentials.go linguist-generated=true
cmd/workspace/system-schemas/system-schemas.go linguist-generated=true
cmd/workspace/table-constraints/table-constraints.go linguist-generated=true
cmd/workspace/tables/tables.go linguist-generated=true
cmd/workspace/temporary-table-credentials/temporary-table-credentials.go linguist-generated=true
cmd/workspace/token-management/token-management.go linguist-generated=true
cmd/workspace/tokens/tokens.go linguist-generated=true
cmd/workspace/users/users.go linguist-generated=true
@ -117,4 +123,4 @@ cmd/workspace/workspace-conf/workspace-conf.go linguist-generated=true
cmd/workspace/workspace/workspace.go linguist-generated=true
bundle/internal/tf/schema/\*.go linguist-generated=true
go.sum linguist-generated=true
bundle/schema/docs/bundle_descriptions.json linguist-generated=true
bundle/schema/jsonschema.json linguist-generated=true

View File

@ -33,7 +33,7 @@ jobs:
- name: Setup Go
uses: actions/setup-go@v5
with:
go-version: 1.22.x
go-version: 1.22.7
- name: Setup Python
uses: actions/setup-python@v5
@ -68,7 +68,7 @@ jobs:
- name: Setup Go
uses: actions/setup-go@v5
with:
go-version: 1.22.x
go-version: 1.22.7
# No need to download cached dependencies when running gofmt.
cache: false
@ -100,18 +100,25 @@ jobs:
- name: Setup Go
uses: actions/setup-go@v5
with:
go-version: 1.22.x
go-version: 1.22.7
# Github repo: https://github.com/ajv-validator/ajv-cli
- name: Install ajv-cli
run: npm install -g ajv-cli@5.0.0
# Assert that the generated bundle schema is a valid JSON schema by using
# ajv-cli to validate it against a sample configuration file.
# ajv-cli to validate it against bundle configuration files.
# By default the ajv-cli runs in strict mode which will fail if the schema
# itself is not valid. Strict mode is more strict than the JSON schema
# specification. See for details: https://ajv.js.org/options.html#strict-mode-options
- name: Validate bundle schema
run: |
go run main.go bundle schema > schema.json
ajv -s schema.json -d ./bundle/tests/basic/databricks.yml
for file in ./bundle/internal/schema/testdata/pass/*.yml; do
ajv test -s schema.json -d $file --valid
done
for file in ./bundle/internal/schema/testdata/fail/*.yml; do
ajv test -s schema.json -d $file --invalid
done

View File

@ -21,7 +21,7 @@ jobs:
- name: Setup Go
uses: actions/setup-go@v5
with:
go-version: 1.22.x
go-version: 1.22.7
# The default cache key for this action considers only the `go.sum` file.
# We include .goreleaser.yaml here to differentiate from the cache used by the push action

View File

@ -22,7 +22,7 @@ jobs:
- name: Setup Go
uses: actions/setup-go@v5
with:
go-version: 1.22.x
go-version: 1.22.7
# The default cache key for this action considers only the `go.sum` file.
# We include .goreleaser.yaml here to differentiate from the cache used by the push action

View File

@ -1,5 +1,152 @@
# Version changelog
## [Release] Release v0.230.0
Notable changes for Databricks Asset Bundles:
Workspace paths are automatically prefixed with `/Workspace`. In addition, all usage of path strings such as `/Workspace/${workspace.root_path}/...` in bundle configuration is automatically replaced with `${workspace.root_path}/...` and generates a warning as part of bundle validate.
More details can be found here: https://docs.databricks.com/en/release-notes/dev-tools/bundles.html#workspace-paths
Bundles:
* Add an error if state files grow bigger than the export limit ([#1795](https://github.com/databricks/cli/pull/1795)).
* Always prepend bundle remote paths with /Workspace ([#1724](https://github.com/databricks/cli/pull/1724)).
* Add resource path field to bundle workspace configuration ([#1800](https://github.com/databricks/cli/pull/1800)).
* Add validation for files with a `.(resource-name).yml` extension ([#1780](https://github.com/databricks/cli/pull/1780)).
Internal:
* Remove deprecated or readonly fields from the bundle schema ([#1809](https://github.com/databricks/cli/pull/1809)).
API Changes:
* Changed `databricks git-credentials create`, `databricks git-credentials delete`, `databricks git-credentials get`, `databricks git-credentials list`, `databricks git-credentials update` commands .
* Changed `databricks repos create`, `databricks repos delete`, `databricks repos get`, `databricks repos update` command .
OpenAPI commit 0c86ea6dbd9a730c24ff0d4e509603e476955ac5 (2024-10-02)
Dependency updates:
* Upgrade TF provider to 1.53.0 ([#1815](https://github.com/databricks/cli/pull/1815)).
* Bump golang.org/x/term from 0.24.0 to 0.25.0 ([#1811](https://github.com/databricks/cli/pull/1811)).
* Bump golang.org/x/text from 0.18.0 to 0.19.0 ([#1812](https://github.com/databricks/cli/pull/1812)).
* Bump github.com/databricks/databricks-sdk-go from 0.47.0 to 0.48.0 ([#1810](https://github.com/databricks/cli/pull/1810)).
## [Release] Release v0.229.0
Bundles:
* Added support for creating all-purpose clusters ([#1698](https://github.com/databricks/cli/pull/1698)).
* Reduce time until the prompt is shown for bundle run ([#1727](https://github.com/databricks/cli/pull/1727)).
* Use Unity Catalog for pipelines in the default-python template ([#1766](https://github.com/databricks/cli/pull/1766)).
* Add verbose flag to the "bundle deploy" command ([#1774](https://github.com/databricks/cli/pull/1774)).
* Fixed full variable override detection ([#1787](https://github.com/databricks/cli/pull/1787)).
* Add sub-extension to resource files in built-in templates ([#1777](https://github.com/databricks/cli/pull/1777)).
* Fix panic in `apply_presets.go` ([#1796](https://github.com/databricks/cli/pull/1796)).
Internal:
* Assert tokens are redacted in origin URL when username is not specified ([#1785](https://github.com/databricks/cli/pull/1785)).
* Refactor jobs path translation ([#1782](https://github.com/databricks/cli/pull/1782)).
* Add JobTaskClusterSpec validate mutator ([#1784](https://github.com/databricks/cli/pull/1784)).
* Pin Go toolchain to 1.22.7 ([#1790](https://github.com/databricks/cli/pull/1790)).
* Modify SetLocation test utility to take full locations as argument ([#1788](https://github.com/databricks/cli/pull/1788)).
* Simplified isFullVariableOverrideDef implementation ([#1791](https://github.com/databricks/cli/pull/1791)).
* Sort tasks by `task_key` before generating the Terraform configuration ([#1776](https://github.com/databricks/cli/pull/1776)).
* Trim trailing whitespace ([#1794](https://github.com/databricks/cli/pull/1794)).
* Move trampoline code into trampoline package ([#1793](https://github.com/databricks/cli/pull/1793)).
* Rename `RootPath` -> `BundleRootPath` ([#1792](https://github.com/databricks/cli/pull/1792)).
API Changes:
* Changed `databricks apps delete` command to return .
* Changed `databricks apps deploy` command with new required argument order.
* Changed `databricks apps start` command to return .
* Changed `databricks apps stop` command to return .
* Added `databricks temporary-table-credentials` command group.
* Added `databricks serving-endpoints put-ai-gateway` command.
* Added `databricks disable-legacy-access` command group.
* Added `databricks account disable-legacy-features` command group.
OpenAPI commit 6f6b1371e640f2dfeba72d365ac566368656f6b6 (2024-09-19)
Dependency updates:
* Upgrade to Go SDK 0.47.0 ([#1799](https://github.com/databricks/cli/pull/1799)).
* Upgrade to TF provider 1.52 ([#1781](https://github.com/databricks/cli/pull/1781)).
* Bump golang.org/x/mod from 0.20.0 to 0.21.0 ([#1758](https://github.com/databricks/cli/pull/1758)).
* Bump github.com/hashicorp/hc-install from 0.7.0 to 0.9.0 ([#1772](https://github.com/databricks/cli/pull/1772)).
## [Release] Release v0.228.1
Bundles:
* Added listing cluster filtering for cluster lookups ([#1754](https://github.com/databricks/cli/pull/1754)).
* Expand library globs relative to the sync root ([#1756](https://github.com/databricks/cli/pull/1756)).
* Fixed generated YAML missing 'default' for empty values ([#1765](https://github.com/databricks/cli/pull/1765)).
* Use periodic triggers in all templates ([#1739](https://github.com/databricks/cli/pull/1739)).
* Use the friendly name of service principals when shortening their name ([#1770](https://github.com/databricks/cli/pull/1770)).
* Fixed detecting full syntax variable override which includes type field ([#1775](https://github.com/databricks/cli/pull/1775)).
Internal:
* Pass copy of `dyn.Path` to callback function ([#1747](https://github.com/databricks/cli/pull/1747)).
* Make bundle JSON schema modular with `` ([#1700](https://github.com/databricks/cli/pull/1700)).
* Alias variables block in the `Target` struct ([#1748](https://github.com/databricks/cli/pull/1748)).
* Add end to end integration tests for bundle JSON schema ([#1726](https://github.com/databricks/cli/pull/1726)).
* Fix artifact upload integration tests ([#1767](https://github.com/databricks/cli/pull/1767)).
API Changes:
* Added `databricks quality-monitors regenerate-dashboard` command.
OpenAPI commit d05898328669a3f8ab0c2ecee37db2673d3ea3f7 (2024-09-04)
Dependency updates:
* Bump golang.org/x/term from 0.23.0 to 0.24.0 ([#1757](https://github.com/databricks/cli/pull/1757)).
* Bump golang.org/x/oauth2 from 0.22.0 to 0.23.0 ([#1761](https://github.com/databricks/cli/pull/1761)).
* Bump golang.org/x/text from 0.17.0 to 0.18.0 ([#1759](https://github.com/databricks/cli/pull/1759)).
* Bump github.com/databricks/databricks-sdk-go from 0.45.0 to 0.46.0 ([#1760](https://github.com/databricks/cli/pull/1760)).
## [Release] Release v0.228.0
CLI:
* Do not error if we cannot prompt for a profile in `auth login` ([#1745](https://github.com/databricks/cli/pull/1745)).
Bundles:
As of this release, the CLI will show a prompt if there are configuration changes that lead to DLT pipeline recreation.
Users can skip the prompt by specifying the `--auto-approve` flag.
* Pass along to Terraform process ([#1734](https://github.com/databricks/cli/pull/1734)).
* Add prompt when a pipeline recreation happens ([#1672](https://github.com/databricks/cli/pull/1672)).
* Use materialized views in the default-sql template ([#1709](https://github.com/databricks/cli/pull/1709)).
* Update templates to latest LTS DBR ([#1715](https://github.com/databricks/cli/pull/1715)).
* Make lock optional in the JSON schema ([#1738](https://github.com/databricks/cli/pull/1738)).
* Do not suppress normalisation diagnostics for resolving variables ([#1740](https://github.com/databricks/cli/pull/1740)).
* Include a permissions section in all templates ([#1713](https://github.com/databricks/cli/pull/1713)).
* Fixed complex variables are not being correctly merged from include files ([#1746](https://github.com/databricks/cli/pull/1746)).
* Fixed variable override in target with full variable syntax ([#1749](https://github.com/databricks/cli/pull/1749)).
Internal:
* Consider serverless clusters as compatible for Python wheel tasks ([#1733](https://github.com/databricks/cli/pull/1733)).
* PythonMutator: explain missing package error ([#1736](https://github.com/databricks/cli/pull/1736)).
* Add `dyn.Time` to box a timestamp with its original string value ([#1732](https://github.com/databricks/cli/pull/1732)).
* Fix streaming of stdout, stdin, stderr in cobra test runner ([#1742](https://github.com/databricks/cli/pull/1742)).
Dependency updates:
* Bump github.com/Masterminds/semver/v3 from 3.2.1 to 3.3.0 ([#1741](https://github.com/databricks/cli/pull/1741)).
## [Release] Release v0.227.1
CLI:
* Disable prompt for storage-credentials get command ([#1723](https://github.com/databricks/cli/pull/1723)).
Bundles:
* Do not treat empty path as a local path ([#1717](https://github.com/databricks/cli/pull/1717)).
* Correctly mark PyPI package name specs with multiple specifiers as remote libraries ([#1725](https://github.com/databricks/cli/pull/1725)).
* Improve error handling for /Volumes paths in mode: development ([#1716](https://github.com/databricks/cli/pull/1716)).
Internal:
* Ignore CLI version check on development builds of the CLI ([#1714](https://github.com/databricks/cli/pull/1714)).
API Changes:
* Added `databricks resource-quotas` command group.
* Added `databricks policy-compliance-for-clusters` command group.
* Added `databricks policy-compliance-for-jobs` command group.
OpenAPI commit 3eae49b444cac5a0118a3503e5b7ecef7f96527a (2024-08-21)
Dependency updates:
* Bump github.com/databricks/databricks-sdk-go from 0.44.0 to 0.45.0 ([#1719](https://github.com/databricks/cli/pull/1719)).
* Revert hc-install version to 0.7.0 ([#1711](https://github.com/databricks/cli/pull/1711)).
## [Release] Release v0.227.0
CLI:

View File

@ -33,12 +33,7 @@ func createGlobError(v dyn.Value, p dyn.Path, message string) diag.Diagnostic {
Severity: diag.Error,
Summary: fmt.Sprintf("%s: %s", source, message),
Locations: []dyn.Location{v.Location()},
Paths: []dyn.Path{
// Hack to clone the path. This path copy is mutable.
// To be addressed in a later PR.
p.Append(),
},
Paths: []dyn.Path{p},
}
}

View File

@ -10,6 +10,7 @@ import (
"github.com/databricks/cli/bundle/config"
"github.com/databricks/cli/bundle/internal/bundletest"
"github.com/databricks/cli/internal/testutil"
"github.com/databricks/cli/libs/dyn"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
@ -23,7 +24,7 @@ func TestExpandGlobs_Nominal(t *testing.T) {
testutil.Touch(t, tmpDir, "bc.txt")
b := &bundle.Bundle{
RootPath: tmpDir,
BundleRootPath: tmpDir,
Config: config.Root{
Artifacts: config.Artifacts{
"test": {
@ -36,7 +37,7 @@ func TestExpandGlobs_Nominal(t *testing.T) {
},
}
bundletest.SetLocation(b, "artifacts", filepath.Join(tmpDir, "databricks.yml"))
bundletest.SetLocation(b, "artifacts", []dyn.Location{{File: filepath.Join(tmpDir, "databricks.yml")}})
ctx := context.Background()
diags := bundle.Apply(ctx, b, bundle.Seq(
@ -62,7 +63,7 @@ func TestExpandGlobs_InvalidPattern(t *testing.T) {
tmpDir := t.TempDir()
b := &bundle.Bundle{
RootPath: tmpDir,
BundleRootPath: tmpDir,
Config: config.Root{
Artifacts: config.Artifacts{
"test": {
@ -77,7 +78,7 @@ func TestExpandGlobs_InvalidPattern(t *testing.T) {
},
}
bundletest.SetLocation(b, "artifacts", filepath.Join(tmpDir, "databricks.yml"))
bundletest.SetLocation(b, "artifacts", []dyn.Location{{File: filepath.Join(tmpDir, "databricks.yml")}})
ctx := context.Background()
diags := bundle.Apply(ctx, b, bundle.Seq(
@ -110,7 +111,7 @@ func TestExpandGlobs_NoMatches(t *testing.T) {
testutil.Touch(t, tmpDir, "b2.txt")
b := &bundle.Bundle{
RootPath: tmpDir,
BundleRootPath: tmpDir,
Config: config.Root{
Artifacts: config.Artifacts{
"test": {
@ -125,7 +126,7 @@ func TestExpandGlobs_NoMatches(t *testing.T) {
},
}
bundletest.SetLocation(b, "artifacts", filepath.Join(tmpDir, "databricks.yml"))
bundletest.SetLocation(b, "artifacts", []dyn.Location{{File: filepath.Join(tmpDir, "databricks.yml")}})
ctx := context.Background()
diags := bundle.Apply(ctx, b, bundle.Seq(

View File

@ -47,7 +47,7 @@ func (m *prepare) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics
// If artifact path is not provided, use bundle root dir
if artifact.Path == "" {
artifact.Path = b.RootPath
artifact.Path = b.BundleRootPath
}
if !filepath.IsAbs(artifact.Path) {

View File

@ -35,21 +35,21 @@ func (m *detectPkg) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostic
log.Infof(ctx, "Detecting Python wheel project...")
// checking if there is setup.py in the bundle root
setupPy := filepath.Join(b.RootPath, "setup.py")
setupPy := filepath.Join(b.BundleRootPath, "setup.py")
_, err := os.Stat(setupPy)
if err != nil {
log.Infof(ctx, "No Python wheel project found at bundle root folder")
return nil
}
log.Infof(ctx, fmt.Sprintf("Found Python wheel project at %s", b.RootPath))
log.Infof(ctx, fmt.Sprintf("Found Python wheel project at %s", b.BundleRootPath))
module := extractModuleName(setupPy)
if b.Config.Artifacts == nil {
b.Config.Artifacts = make(map[string]*config.Artifact)
}
pkgPath, err := filepath.Abs(b.RootPath)
pkgPath, err := filepath.Abs(b.BundleRootPath)
if err != nil {
return diag.FromErr(err)
}

View File

@ -31,22 +31,26 @@ import (
const internalFolder = ".internal"
type Bundle struct {
// RootPath contains the directory path to the root of the bundle.
// BundleRootPath is the local path to the root directory of the bundle.
// It is set when we instantiate a new bundle instance.
RootPath string
BundleRootPath string
// BundleRoot is a virtual filesystem path to the root of the bundle.
// BundleRoot is a virtual filesystem path to [BundleRootPath].
// Exclusively use this field for filesystem operations.
BundleRoot vfs.Path
// SyncRoot is a virtual filesystem path to the root directory of the files that are synchronized to the workspace.
// It can be an ancestor to [BundleRoot], but not a descendant; that is, [SyncRoot] must contain [BundleRoot].
SyncRoot vfs.Path
// SyncRootPath is the local path to the root directory of files that are synchronized to the workspace.
// It is equal to `SyncRoot.Native()` and included as dedicated field for convenient access.
// By default, it is the same as [BundleRootPath].
// If it is different, it must be an ancestor to [BundleRootPath].
// That is, [SyncRootPath] must contain [BundleRootPath].
SyncRootPath string
// SyncRoot is a virtual filesystem path to [SyncRootPath].
// Exclusively use this field for filesystem operations.
SyncRoot vfs.Path
// Config contains the bundle configuration.
// It is loaded from the bundle configuration files and mutators may update it.
Config config.Root
// Metadata about the bundle deployment. This is the interface Databricks services
@ -84,14 +88,14 @@ type Bundle struct {
func Load(ctx context.Context, path string) (*Bundle, error) {
b := &Bundle{
RootPath: filepath.Clean(path),
BundleRootPath: filepath.Clean(path),
BundleRoot: vfs.MustNew(path),
}
configFile, err := config.FileNames.FindInPath(path)
if err != nil {
return nil, err
}
log.Debugf(ctx, "Found bundle root at %s (file %s)", b.RootPath, configFile)
log.Debugf(ctx, "Found bundle root at %s (file %s)", b.BundleRootPath, configFile)
return b, nil
}
@ -160,7 +164,7 @@ func (b *Bundle) CacheDir(ctx context.Context, paths ...string) (string, error)
if !exists || cacheDirName == "" {
cacheDirName = filepath.Join(
// Anchor at bundle root directory.
b.RootPath,
b.BundleRootPath,
// Static cache directory.
".databricks",
"bundle",
@ -212,7 +216,7 @@ func (b *Bundle) GetSyncIncludePatterns(ctx context.Context) ([]string, error) {
if err != nil {
return nil, err
}
internalDirRel, err := filepath.Rel(b.RootPath, internalDir)
internalDirRel, err := filepath.Rel(b.BundleRootPath, internalDir)
if err != nil {
return nil, err
}

View File

@ -21,7 +21,7 @@ func (r ReadOnlyBundle) Config() config.Root {
}
func (r ReadOnlyBundle) RootPath() string {
return r.b.RootPath
return r.b.BundleRootPath
}
func (r ReadOnlyBundle) BundleRoot() vfs.Path {

View File

@ -79,7 +79,7 @@ func TestBundleMustLoadSuccess(t *testing.T) {
t.Setenv(env.RootVariable, "./tests/basic")
b, err := MustLoad(context.Background())
require.NoError(t, err)
assert.Equal(t, "tests/basic", filepath.ToSlash(b.RootPath))
assert.Equal(t, "tests/basic", filepath.ToSlash(b.BundleRootPath))
}
func TestBundleMustLoadFailureWithEnv(t *testing.T) {
@ -98,7 +98,7 @@ func TestBundleTryLoadSuccess(t *testing.T) {
t.Setenv(env.RootVariable, "./tests/basic")
b, err := TryLoad(context.Background())
require.NoError(t, err)
assert.Equal(t, "tests/basic", filepath.ToSlash(b.RootPath))
assert.Equal(t, "tests/basic", filepath.ToSlash(b.BundleRootPath))
}
func TestBundleTryLoadFailureWithEnv(t *testing.T) {

View File

@ -38,8 +38,11 @@ type Bundle struct {
// Annotated readonly as this should be set at the target level.
Mode Mode `json:"mode,omitempty" bundle:"readonly"`
// Overrides the compute used for jobs and other supported assets.
ComputeID string `json:"compute_id,omitempty"`
// DEPRECATED: Overrides the compute used for jobs and other supported assets.
ComputeId string `json:"compute_id,omitempty"`
// Overrides the cluster used for jobs and other supported assets.
ClusterId string `json:"cluster_id,omitempty"`
// Deployment section specifies deployment related configuration for bundle
Deployment Deployment `json:"deployment,omitempty"`

View File

@ -6,5 +6,5 @@ type Deployment struct {
FailOnActiveRuns bool `json:"fail_on_active_runs,omitempty"`
// Lock configures locking behavior on deployment.
Lock Lock `json:"lock"`
Lock Lock `json:"lock,omitempty"`
}

View File

@ -25,6 +25,20 @@ func ConvertJobToValue(job *jobs.Job) (dyn.Value, error) {
value["tasks"] = dyn.NewValue(tasks, []dyn.Location{{Line: jobOrder.Get("tasks")}})
}
// We're processing job.Settings.Parameters separately to retain empty default values.
if len(job.Settings.Parameters) > 0 {
params := make([]dyn.Value, 0)
for _, parameter := range job.Settings.Parameters {
p := map[string]dyn.Value{
"name": dyn.NewValue(parameter.Name, []dyn.Location{{Line: 0}}), // We use Line: 0 to ensure that the name goes first.
"default": dyn.NewValue(parameter.Default, []dyn.Location{{Line: 1}}),
}
params = append(params, dyn.NewValue(p, []dyn.Location{}))
}
value["parameters"] = dyn.NewValue(params, []dyn.Location{{Line: jobOrder.Get("parameters")}})
}
return yamlsaver.ConvertToMapValue(job.Settings, jobOrder, []string{"format", "new_cluster", "existing_cluster_id"}, value)
}

View File

@ -20,7 +20,7 @@ func (m *entryPoint) Name() string {
}
func (m *entryPoint) Apply(_ context.Context, b *bundle.Bundle) diag.Diagnostics {
path, err := config.FileNames.FindInPath(b.RootPath)
path, err := config.FileNames.FindInPath(b.BundleRootPath)
if err != nil {
return diag.FromErr(err)
}

View File

@ -18,7 +18,7 @@ func TestEntryPointNoRootPath(t *testing.T) {
func TestEntryPoint(t *testing.T) {
b := &bundle.Bundle{
RootPath: "testdata",
BundleRootPath: "testdata/basic",
}
diags := bundle.Apply(context.Background(), b, loader.EntryPoint())
require.NoError(t, diags.Error())

View File

@ -3,12 +3,135 @@ package loader
import (
"context"
"fmt"
"slices"
"sort"
"strings"
"github.com/databricks/cli/bundle"
"github.com/databricks/cli/bundle/config"
"github.com/databricks/cli/libs/diag"
"github.com/databricks/cli/libs/dyn"
)
func validateFileFormat(configRoot dyn.Value, filePath string) diag.Diagnostics {
for _, resourceDescription := range config.SupportedResources() {
singularName := resourceDescription.SingularName
for _, yamlExt := range []string{"yml", "yaml"} {
ext := fmt.Sprintf(".%s.%s", singularName, yamlExt)
if strings.HasSuffix(filePath, ext) {
return validateSingleResourceDefined(configRoot, ext, singularName)
}
}
}
return nil
}
func validateSingleResourceDefined(configRoot dyn.Value, ext, typ string) diag.Diagnostics {
type resource struct {
path dyn.Path
value dyn.Value
typ string
key string
}
resources := []resource{}
supportedResources := config.SupportedResources()
// Gather all resources defined in the resources block.
_, err := dyn.MapByPattern(
configRoot,
dyn.NewPattern(dyn.Key("resources"), dyn.AnyKey(), dyn.AnyKey()),
func(p dyn.Path, v dyn.Value) (dyn.Value, error) {
// The key for the resource, e.g. "my_job" for jobs.my_job.
k := p[2].Key()
// The type of the resource, e.g. "job" for jobs.my_job.
typ := supportedResources[p[1].Key()].SingularName
resources = append(resources, resource{path: p, value: v, typ: typ, key: k})
return v, nil
})
if err != nil {
return diag.FromErr(err)
}
// Gather all resources defined in a target block.
_, err = dyn.MapByPattern(
configRoot,
dyn.NewPattern(dyn.Key("targets"), dyn.AnyKey(), dyn.Key("resources"), dyn.AnyKey(), dyn.AnyKey()),
func(p dyn.Path, v dyn.Value) (dyn.Value, error) {
// The key for the resource, e.g. "my_job" for jobs.my_job.
k := p[4].Key()
// The type of the resource, e.g. "job" for jobs.my_job.
typ := supportedResources[p[3].Key()].SingularName
resources = append(resources, resource{path: p, value: v, typ: typ, key: k})
return v, nil
})
if err != nil {
return diag.FromErr(err)
}
typeMatch := true
seenKeys := map[string]struct{}{}
for _, rr := range resources {
// case: The resource is not of the correct type.
if rr.typ != typ {
typeMatch = false
break
}
seenKeys[rr.key] = struct{}{}
}
// Format matches. There's at most one resource defined in the file.
// The resource is also of the correct type.
if typeMatch && len(seenKeys) <= 1 {
return nil
}
detail := strings.Builder{}
detail.WriteString("The following resources are defined or configured in this file:\n")
lines := []string{}
for _, r := range resources {
lines = append(lines, fmt.Sprintf(" - %s (%s)\n", r.key, r.typ))
}
// Sort the lines to print to make the output deterministic.
sort.Strings(lines)
// Compact the lines before writing them to the message to remove any duplicate lines.
// This is needed because we do not dedup earlier when gathering the resources
// and it's valid to define the same resource in both the resources and targets block.
lines = slices.Compact(lines)
for _, l := range lines {
detail.WriteString(l)
}
locations := []dyn.Location{}
paths := []dyn.Path{}
for _, rr := range resources {
locations = append(locations, rr.value.Locations()...)
paths = append(paths, rr.path)
}
// Sort the locations and paths to make the output deterministic.
sort.Slice(locations, func(i, j int) bool {
return locations[i].String() < locations[j].String()
})
sort.Slice(paths, func(i, j int) bool {
return paths[i].String() < paths[j].String()
})
return diag.Diagnostics{
{
Severity: diag.Recommendation,
Summary: fmt.Sprintf("define a single %s in a file with the %s extension.", strings.ReplaceAll(typ, "_", " "), ext),
Detail: detail.String(),
Locations: locations,
Paths: paths,
},
}
}
type processInclude struct {
fullPath string
relPath string
@ -31,6 +154,13 @@ func (m *processInclude) Apply(_ context.Context, b *bundle.Bundle) diag.Diagnos
if diags.HasError() {
return diags
}
// Add any diagnostics associated with the file format.
diags = append(diags, validateFileFormat(this.Value(), m.relPath)...)
if diags.HasError() {
return diags
}
err := b.Config.Merge(this)
if err != nil {
diags = diags.Extend(diag.FromErr(err))

View File

@ -8,13 +8,15 @@ import (
"github.com/databricks/cli/bundle"
"github.com/databricks/cli/bundle/config"
"github.com/databricks/cli/bundle/config/loader"
"github.com/databricks/cli/libs/diag"
"github.com/databricks/cli/libs/dyn"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestProcessInclude(t *testing.T) {
b := &bundle.Bundle{
RootPath: "testdata",
BundleRootPath: "testdata/basic",
Config: config.Root{
Workspace: config.Workspace{
Host: "foo",
@ -22,7 +24,7 @@ func TestProcessInclude(t *testing.T) {
},
}
m := loader.ProcessInclude(filepath.Join(b.RootPath, "host.yml"), "host.yml")
m := loader.ProcessInclude(filepath.Join(b.BundleRootPath, "host.yml"), "host.yml")
assert.Equal(t, "ProcessInclude(host.yml)", m.Name())
// Assert the host value prior to applying the mutator
@ -33,3 +35,184 @@ func TestProcessInclude(t *testing.T) {
require.NoError(t, diags.Error())
assert.Equal(t, "bar", b.Config.Workspace.Host)
}
func TestProcessIncludeFormatMatch(t *testing.T) {
for _, fileName := range []string{
"one_job.job.yml",
"one_pipeline.pipeline.yaml",
"two_job.yml",
"job_and_pipeline.yml",
"multiple_resources.yml",
} {
t.Run(fileName, func(t *testing.T) {
b := &bundle.Bundle{
BundleRootPath: "testdata/format_match",
Config: config.Root{
Bundle: config.Bundle{
Name: "format_test",
},
},
}
m := loader.ProcessInclude(filepath.Join(b.BundleRootPath, fileName), fileName)
diags := bundle.Apply(context.Background(), b, m)
assert.Empty(t, diags)
})
}
}
func TestProcessIncludeFormatNotMatch(t *testing.T) {
for fileName, expectedDiags := range map[string]diag.Diagnostics{
"single_job.pipeline.yaml": {
{
Severity: diag.Recommendation,
Summary: "define a single pipeline in a file with the .pipeline.yaml extension.",
Detail: "The following resources are defined or configured in this file:\n - job1 (job)\n",
Locations: []dyn.Location{
{File: filepath.FromSlash("testdata/format_not_match/single_job.pipeline.yaml"), Line: 11, Column: 11},
{File: filepath.FromSlash("testdata/format_not_match/single_job.pipeline.yaml"), Line: 4, Column: 7},
},
Paths: []dyn.Path{
dyn.MustPathFromString("resources.jobs.job1"),
dyn.MustPathFromString("targets.target1.resources.jobs.job1"),
},
},
},
"job_and_pipeline.job.yml": {
{
Severity: diag.Recommendation,
Summary: "define a single job in a file with the .job.yml extension.",
Detail: "The following resources are defined or configured in this file:\n - job1 (job)\n - pipeline1 (pipeline)\n",
Locations: []dyn.Location{
{File: filepath.FromSlash("testdata/format_not_match/job_and_pipeline.job.yml"), Line: 11, Column: 11},
{File: filepath.FromSlash("testdata/format_not_match/job_and_pipeline.job.yml"), Line: 4, Column: 7},
},
Paths: []dyn.Path{
dyn.MustPathFromString("resources.pipelines.pipeline1"),
dyn.MustPathFromString("targets.target1.resources.jobs.job1"),
},
},
},
"job_and_pipeline.experiment.yml": {
{
Severity: diag.Recommendation,
Summary: "define a single experiment in a file with the .experiment.yml extension.",
Detail: "The following resources are defined or configured in this file:\n - job1 (job)\n - pipeline1 (pipeline)\n",
Locations: []dyn.Location{
{File: filepath.FromSlash("testdata/format_not_match/job_and_pipeline.experiment.yml"), Line: 11, Column: 11},
{File: filepath.FromSlash("testdata/format_not_match/job_and_pipeline.experiment.yml"), Line: 4, Column: 7},
},
Paths: []dyn.Path{
dyn.MustPathFromString("resources.pipelines.pipeline1"),
dyn.MustPathFromString("targets.target1.resources.jobs.job1"),
},
},
},
"two_jobs.job.yml": {
{
Severity: diag.Recommendation,
Summary: "define a single job in a file with the .job.yml extension.",
Detail: "The following resources are defined or configured in this file:\n - job1 (job)\n - job2 (job)\n",
Locations: []dyn.Location{
{File: filepath.FromSlash("testdata/format_not_match/two_jobs.job.yml"), Line: 4, Column: 7},
{File: filepath.FromSlash("testdata/format_not_match/two_jobs.job.yml"), Line: 7, Column: 7},
},
Paths: []dyn.Path{
dyn.MustPathFromString("resources.jobs.job1"),
dyn.MustPathFromString("resources.jobs.job2"),
},
},
},
"second_job_in_target.job.yml": {
{
Severity: diag.Recommendation,
Summary: "define a single job in a file with the .job.yml extension.",
Detail: "The following resources are defined or configured in this file:\n - job1 (job)\n - job2 (job)\n",
Locations: []dyn.Location{
{File: filepath.FromSlash("testdata/format_not_match/second_job_in_target.job.yml"), Line: 11, Column: 11},
{File: filepath.FromSlash("testdata/format_not_match/second_job_in_target.job.yml"), Line: 4, Column: 7},
},
Paths: []dyn.Path{
dyn.MustPathFromString("resources.jobs.job1"),
dyn.MustPathFromString("targets.target1.resources.jobs.job2"),
},
},
},
"two_jobs_in_target.job.yml": {
{
Severity: diag.Recommendation,
Summary: "define a single job in a file with the .job.yml extension.",
Detail: "The following resources are defined or configured in this file:\n - job1 (job)\n - job2 (job)\n",
Locations: []dyn.Location{
{File: filepath.FromSlash("testdata/format_not_match/two_jobs_in_target.job.yml"), Line: 6, Column: 11},
{File: filepath.FromSlash("testdata/format_not_match/two_jobs_in_target.job.yml"), Line: 8, Column: 11},
},
Paths: []dyn.Path{
dyn.MustPathFromString("targets.target1.resources.jobs.job1"),
dyn.MustPathFromString("targets.target1.resources.jobs.job2"),
},
},
},
"multiple_resources.model_serving_endpoint.yml": {
{
Severity: diag.Recommendation,
Summary: "define a single model serving endpoint in a file with the .model_serving_endpoint.yml extension.",
Detail: `The following resources are defined or configured in this file:
- experiment1 (experiment)
- job1 (job)
- job2 (job)
- job3 (job)
- model1 (model)
- model_serving_endpoint1 (model_serving_endpoint)
- pipeline1 (pipeline)
- pipeline2 (pipeline)
- quality_monitor1 (quality_monitor)
- registered_model1 (registered_model)
- schema1 (schema)
`,
Locations: []dyn.Location{
{File: filepath.FromSlash("testdata/format_not_match/multiple_resources.model_serving_endpoint.yml"), Line: 12, Column: 7},
{File: filepath.FromSlash("testdata/format_not_match/multiple_resources.model_serving_endpoint.yml"), Line: 14, Column: 7},
{File: filepath.FromSlash("testdata/format_not_match/multiple_resources.model_serving_endpoint.yml"), Line: 18, Column: 7},
{File: filepath.FromSlash("testdata/format_not_match/multiple_resources.model_serving_endpoint.yml"), Line: 22, Column: 7},
{File: filepath.FromSlash("testdata/format_not_match/multiple_resources.model_serving_endpoint.yml"), Line: 24, Column: 7},
{File: filepath.FromSlash("testdata/format_not_match/multiple_resources.model_serving_endpoint.yml"), Line: 28, Column: 7},
{File: filepath.FromSlash("testdata/format_not_match/multiple_resources.model_serving_endpoint.yml"), Line: 35, Column: 11},
{File: filepath.FromSlash("testdata/format_not_match/multiple_resources.model_serving_endpoint.yml"), Line: 39, Column: 11},
{File: filepath.FromSlash("testdata/format_not_match/multiple_resources.model_serving_endpoint.yml"), Line: 43, Column: 11},
{File: filepath.FromSlash("testdata/format_not_match/multiple_resources.model_serving_endpoint.yml"), Line: 4, Column: 7},
{File: filepath.FromSlash("testdata/format_not_match/multiple_resources.model_serving_endpoint.yml"), Line: 8, Column: 7},
},
Paths: []dyn.Path{
dyn.MustPathFromString("resources.experiments.experiment1"),
dyn.MustPathFromString("resources.jobs.job1"),
dyn.MustPathFromString("resources.jobs.job2"),
dyn.MustPathFromString("resources.model_serving_endpoints.model_serving_endpoint1"),
dyn.MustPathFromString("resources.models.model1"),
dyn.MustPathFromString("resources.pipelines.pipeline1"),
dyn.MustPathFromString("resources.pipelines.pipeline2"),
dyn.MustPathFromString("resources.schemas.schema1"),
dyn.MustPathFromString("targets.target1.resources.jobs.job3"),
dyn.MustPathFromString("targets.target1.resources.quality_monitors.quality_monitor1"),
dyn.MustPathFromString("targets.target1.resources.registered_models.registered_model1"),
},
},
},
} {
t.Run(fileName, func(t *testing.T) {
b := &bundle.Bundle{
BundleRootPath: "testdata/format_not_match",
Config: config.Root{
Bundle: config.Bundle{
Name: "format_test",
},
},
}
m := loader.ProcessInclude(filepath.Join(b.BundleRootPath, fileName), fileName)
diags := bundle.Apply(context.Background(), b, m)
require.Len(t, diags, 1)
assert.Equal(t, expectedDiags, diags)
})
}
}

View File

@ -47,7 +47,7 @@ func (m *processRootIncludes) Apply(ctx context.Context, b *bundle.Bundle) diag.
}
// Anchor includes to the bundle root path.
matches, err := filepath.Glob(filepath.Join(b.RootPath, entry))
matches, err := filepath.Glob(filepath.Join(b.BundleRootPath, entry))
if err != nil {
return diag.FromErr(err)
}
@ -61,7 +61,7 @@ func (m *processRootIncludes) Apply(ctx context.Context, b *bundle.Bundle) diag.
// Filter matches to ones we haven't seen yet.
var includes []string
for _, match := range matches {
rel, err := filepath.Rel(b.RootPath, match)
rel, err := filepath.Rel(b.BundleRootPath, match)
if err != nil {
return diag.FromErr(err)
}
@ -76,7 +76,7 @@ func (m *processRootIncludes) Apply(ctx context.Context, b *bundle.Bundle) diag.
slices.Sort(includes)
files = append(files, includes...)
for _, include := range includes {
out = append(out, ProcessInclude(filepath.Join(b.RootPath, include), include))
out = append(out, ProcessInclude(filepath.Join(b.BundleRootPath, include), include))
}
}

View File

@ -15,7 +15,7 @@ import (
func TestProcessRootIncludesEmpty(t *testing.T) {
b := &bundle.Bundle{
RootPath: ".",
BundleRootPath: ".",
}
diags := bundle.Apply(context.Background(), b, loader.ProcessRootIncludes())
require.NoError(t, diags.Error())
@ -30,7 +30,7 @@ func TestProcessRootIncludesAbs(t *testing.T) {
}
b := &bundle.Bundle{
RootPath: ".",
BundleRootPath: ".",
Config: config.Root{
Include: []string{
"/tmp/*.yml",
@ -44,7 +44,7 @@ func TestProcessRootIncludesAbs(t *testing.T) {
func TestProcessRootIncludesSingleGlob(t *testing.T) {
b := &bundle.Bundle{
RootPath: t.TempDir(),
BundleRootPath: t.TempDir(),
Config: config.Root{
Include: []string{
"*.yml",
@ -52,9 +52,9 @@ func TestProcessRootIncludesSingleGlob(t *testing.T) {
},
}
testutil.Touch(t, b.RootPath, "databricks.yml")
testutil.Touch(t, b.RootPath, "a.yml")
testutil.Touch(t, b.RootPath, "b.yml")
testutil.Touch(t, b.BundleRootPath, "databricks.yml")
testutil.Touch(t, b.BundleRootPath, "a.yml")
testutil.Touch(t, b.BundleRootPath, "b.yml")
diags := bundle.Apply(context.Background(), b, loader.ProcessRootIncludes())
require.NoError(t, diags.Error())
@ -63,7 +63,7 @@ func TestProcessRootIncludesSingleGlob(t *testing.T) {
func TestProcessRootIncludesMultiGlob(t *testing.T) {
b := &bundle.Bundle{
RootPath: t.TempDir(),
BundleRootPath: t.TempDir(),
Config: config.Root{
Include: []string{
"a*.yml",
@ -72,8 +72,8 @@ func TestProcessRootIncludesMultiGlob(t *testing.T) {
},
}
testutil.Touch(t, b.RootPath, "a1.yml")
testutil.Touch(t, b.RootPath, "b1.yml")
testutil.Touch(t, b.BundleRootPath, "a1.yml")
testutil.Touch(t, b.BundleRootPath, "b1.yml")
diags := bundle.Apply(context.Background(), b, loader.ProcessRootIncludes())
require.NoError(t, diags.Error())
@ -82,7 +82,7 @@ func TestProcessRootIncludesMultiGlob(t *testing.T) {
func TestProcessRootIncludesRemoveDups(t *testing.T) {
b := &bundle.Bundle{
RootPath: t.TempDir(),
BundleRootPath: t.TempDir(),
Config: config.Root{
Include: []string{
"*.yml",
@ -91,7 +91,7 @@ func TestProcessRootIncludesRemoveDups(t *testing.T) {
},
}
testutil.Touch(t, b.RootPath, "a.yml")
testutil.Touch(t, b.BundleRootPath, "a.yml")
diags := bundle.Apply(context.Background(), b, loader.ProcessRootIncludes())
require.NoError(t, diags.Error())
@ -100,7 +100,7 @@ func TestProcessRootIncludesRemoveDups(t *testing.T) {
func TestProcessRootIncludesNotExists(t *testing.T) {
b := &bundle.Bundle{
RootPath: t.TempDir(),
BundleRootPath: t.TempDir(),
Config: config.Root{
Include: []string{
"notexist.yml",

View File

@ -0,0 +1,11 @@
resources:
pipelines:
pipeline1:
name: pipeline1
targets:
target1:
resources:
jobs:
job1:
name: job1

View File

@ -0,0 +1,43 @@
resources:
experiments:
experiment1:
name: experiment1
model_serving_endpoints:
model_serving_endpoint1:
name: model_serving_endpoint1
jobs:
job1:
name: job1
job2:
name: job2
models:
model1:
name: model1
pipelines:
pipeline1:
name: pipeline1
pipeline2:
name: pipeline2
schemas:
schema1:
name: schema1
targets:
target1:
resources:
quality_monitors:
quality_monitor1:
baseline_table_name: quality_monitor1
jobs:
job3:
name: job3
registered_models:
registered_model1:
name: registered_model1

View File

@ -0,0 +1,11 @@
resources:
jobs:
job1:
name: job1
targets:
target1:
resources:
jobs:
job1:
description: job1

View File

@ -0,0 +1,4 @@
resources:
pipelines:
pipeline1:
name: pipeline1

View File

@ -0,0 +1,7 @@
resources:
jobs:
job1:
name: job1
job2:
name: job2

View File

@ -0,0 +1,11 @@
resources:
pipelines:
pipeline1:
name: pipeline1
targets:
target1:
resources:
jobs:
job1:
name: job1

View File

@ -0,0 +1,11 @@
resources:
pipelines:
pipeline1:
name: pipeline1
targets:
target1:
resources:
jobs:
job1:
name: job1

View File

@ -0,0 +1,43 @@
resources:
experiments:
experiment1:
name: experiment1
model_serving_endpoints:
model_serving_endpoint1:
name: model_serving_endpoint1
jobs:
job1:
name: job1
job2:
name: job2
models:
model1:
name: model1
pipelines:
pipeline1:
name: pipeline1
pipeline2:
name: pipeline2
schemas:
schema1:
name: schema1
targets:
target1:
resources:
quality_monitors:
quality_monitor1:
baseline_table_name: quality_monitor1
jobs:
job3:
name: job3
registered_models:
registered_model1:
name: registered_model1

View File

@ -0,0 +1,11 @@
resources:
jobs:
job1:
name: job1
targets:
target1:
resources:
jobs:
job2:
name: job2

View File

@ -0,0 +1,11 @@
resources:
jobs:
job1:
name: job1
targets:
target1:
resources:
jobs:
job1:
description: job1

View File

@ -0,0 +1,7 @@
resources:
jobs:
job1:
name: job1
job2:
name: job2

View File

@ -0,0 +1,8 @@
targets:
target1:
resources:
jobs:
job1:
description: job1
job2:
description: job2

View File

@ -35,8 +35,10 @@ func (m *applyPresets) Name() string {
}
func (m *applyPresets) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
var diags diag.Diagnostics
if d := validatePauseStatus(b); d != nil {
return d
diags = diags.Extend(d)
}
r := b.Config.Resources
@ -45,7 +47,11 @@ func (m *applyPresets) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnos
tags := toTagArray(t.Tags)
// Jobs presets: Prefix, Tags, JobsMaxConcurrentRuns, TriggerPauseStatus
for _, j := range r.Jobs {
for key, j := range r.Jobs {
if j.JobSettings == nil {
diags = diags.Extend(diag.Errorf("job %s is not defined", key))
continue
}
j.Name = prefix + j.Name
if j.Tags == nil {
j.Tags = make(map[string]string)
@ -77,20 +83,27 @@ func (m *applyPresets) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnos
}
// Pipelines presets: Prefix, PipelinesDevelopment
for i := range r.Pipelines {
r.Pipelines[i].Name = prefix + r.Pipelines[i].Name
for key, p := range r.Pipelines {
if p.PipelineSpec == nil {
diags = diags.Extend(diag.Errorf("pipeline %s is not defined", key))
continue
}
p.Name = prefix + p.Name
if config.IsExplicitlyEnabled(t.PipelinesDevelopment) {
r.Pipelines[i].Development = true
p.Development = true
}
if t.TriggerPauseStatus == config.Paused {
r.Pipelines[i].Continuous = false
p.Continuous = false
}
// As of 2024-06, pipelines don't yet support tags
}
// Models presets: Prefix, Tags
for _, m := range r.Models {
for key, m := range r.Models {
if m.Model == nil {
diags = diags.Extend(diag.Errorf("model %s is not defined", key))
continue
}
m.Name = prefix + m.Name
for _, t := range tags {
exists := slices.ContainsFunc(m.Tags, func(modelTag ml.ModelTag) bool {
@ -104,7 +117,11 @@ func (m *applyPresets) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnos
}
// Experiments presets: Prefix, Tags
for _, e := range r.Experiments {
for key, e := range r.Experiments {
if e.Experiment == nil {
diags = diags.Extend(diag.Errorf("experiment %s is not defined", key))
continue
}
filepath := e.Name
dir := path.Dir(filepath)
base := path.Base(filepath)
@ -128,39 +145,74 @@ func (m *applyPresets) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnos
}
// Model serving endpoint presets: Prefix
for i := range r.ModelServingEndpoints {
r.ModelServingEndpoints[i].Name = normalizePrefix(prefix) + r.ModelServingEndpoints[i].Name
for key, e := range r.ModelServingEndpoints {
if e.CreateServingEndpoint == nil {
diags = diags.Extend(diag.Errorf("model serving endpoint %s is not defined", key))
continue
}
e.Name = normalizePrefix(prefix) + e.Name
// As of 2024-06, model serving endpoints don't yet support tags
}
// Registered models presets: Prefix
for i := range r.RegisteredModels {
r.RegisteredModels[i].Name = normalizePrefix(prefix) + r.RegisteredModels[i].Name
for key, m := range r.RegisteredModels {
if m.CreateRegisteredModelRequest == nil {
diags = diags.Extend(diag.Errorf("registered model %s is not defined", key))
continue
}
m.Name = normalizePrefix(prefix) + m.Name
// As of 2024-06, registered models don't yet support tags
}
// Quality monitors presets: Prefix
// Quality monitors presets: Schedule
if t.TriggerPauseStatus == config.Paused {
for i := range r.QualityMonitors {
for key, q := range r.QualityMonitors {
if q.CreateMonitor == nil {
diags = diags.Extend(diag.Errorf("quality monitor %s is not defined", key))
continue
}
// Remove all schedules from monitors, since they don't support pausing/unpausing.
// Quality monitors might support the "pause" property in the future, so at the
// CLI level we do respect that property if it is set to "unpaused."
if r.QualityMonitors[i].Schedule != nil && r.QualityMonitors[i].Schedule.PauseStatus != catalog.MonitorCronSchedulePauseStatusUnpaused {
r.QualityMonitors[i].Schedule = nil
if q.Schedule != nil && q.Schedule.PauseStatus != catalog.MonitorCronSchedulePauseStatusUnpaused {
q.Schedule = nil
}
}
}
// Schemas: Prefix
for i := range r.Schemas {
r.Schemas[i].Name = normalizePrefix(prefix) + r.Schemas[i].Name
for key, s := range r.Schemas {
if s.CreateSchema == nil {
diags = diags.Extend(diag.Errorf("schema %s is not defined", key))
continue
}
s.Name = normalizePrefix(prefix) + s.Name
// HTTP API for schemas doesn't yet support tags. It's only supported in
// the Databricks UI and via the SQL API.
}
return nil
// Clusters: Prefix, Tags
for key, c := range r.Clusters {
if c.ClusterSpec == nil {
diags = diags.Extend(diag.Errorf("cluster %s is not defined", key))
continue
}
c.ClusterName = prefix + c.ClusterName
if c.CustomTags == nil {
c.CustomTags = make(map[string]string)
}
for _, tag := range tags {
normalisedKey := b.Tagging.NormalizeKey(tag.Key)
normalisedValue := b.Tagging.NormalizeValue(tag.Value)
if _, ok := c.CustomTags[normalisedKey]; !ok {
c.CustomTags[normalisedKey] = normalisedValue
}
}
}
return diags
}
func validatePauseStatus(b *bundle.Bundle) diag.Diagnostics {

View File

@ -251,3 +251,116 @@ func TestApplyPresetsJobsMaxConcurrentRuns(t *testing.T) {
})
}
}
func TestApplyPresetsPrefixWithoutJobSettings(t *testing.T) {
b := &bundle.Bundle{
Config: config.Root{
Resources: config.Resources{
Jobs: map[string]*resources.Job{
"job1": {}, // no jobsettings inside
},
},
Presets: config.Presets{
NamePrefix: "prefix-",
},
},
}
ctx := context.Background()
diags := bundle.Apply(ctx, b, mutator.ApplyPresets())
require.ErrorContains(t, diags.Error(), "job job1 is not defined")
}
func TestApplyPresetsResourceNotDefined(t *testing.T) {
tests := []struct {
resources config.Resources
error string
}{
{
resources: config.Resources{
Jobs: map[string]*resources.Job{
"job1": {}, // no jobsettings inside
},
},
error: "job job1 is not defined",
},
{
resources: config.Resources{
Pipelines: map[string]*resources.Pipeline{
"pipeline1": {}, // no pipelinespec inside
},
},
error: "pipeline pipeline1 is not defined",
},
{
resources: config.Resources{
Models: map[string]*resources.MlflowModel{
"model1": {}, // no model inside
},
},
error: "model model1 is not defined",
},
{
resources: config.Resources{
Experiments: map[string]*resources.MlflowExperiment{
"experiment1": {}, // no experiment inside
},
},
error: "experiment experiment1 is not defined",
},
{
resources: config.Resources{
ModelServingEndpoints: map[string]*resources.ModelServingEndpoint{
"endpoint1": {}, // no CreateServingEndpoint inside
},
RegisteredModels: map[string]*resources.RegisteredModel{
"model1": {}, // no CreateRegisteredModelRequest inside
},
},
error: "model serving endpoint endpoint1 is not defined",
},
{
resources: config.Resources{
QualityMonitors: map[string]*resources.QualityMonitor{
"monitor1": {}, // no CreateMonitor inside
},
},
error: "quality monitor monitor1 is not defined",
},
{
resources: config.Resources{
Schemas: map[string]*resources.Schema{
"schema1": {}, // no CreateSchema inside
},
},
error: "schema schema1 is not defined",
},
{
resources: config.Resources{
Clusters: map[string]*resources.Cluster{
"cluster1": {}, // no ClusterSpec inside
},
},
error: "cluster cluster1 is not defined",
},
}
for _, tt := range tests {
t.Run(tt.error, func(t *testing.T) {
b := &bundle.Bundle{
Config: config.Root{
Resources: tt.resources,
Presets: config.Presets{
TriggerPauseStatus: config.Paused,
},
},
}
ctx := context.Background()
diags := bundle.Apply(ctx, b, mutator.ApplyPresets())
require.ErrorContains(t, diags.Error(), tt.error)
})
}
}

View File

@ -0,0 +1,87 @@
package mutator
import (
"context"
"github.com/databricks/cli/bundle"
"github.com/databricks/cli/libs/diag"
"github.com/databricks/cli/libs/dyn"
)
type computeIdToClusterId struct{}
func ComputeIdToClusterId() bundle.Mutator {
return &computeIdToClusterId{}
}
func (m *computeIdToClusterId) Name() string {
return "ComputeIdToClusterId"
}
func (m *computeIdToClusterId) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
var diags diag.Diagnostics
// The "compute_id" key is set; rewrite it to "cluster_id".
err := b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) {
v, d := rewriteComputeIdToClusterId(v, dyn.NewPath(dyn.Key("bundle")))
diags = diags.Extend(d)
// Check if the "compute_id" key is set in any target overrides.
return dyn.MapByPattern(v, dyn.NewPattern(dyn.Key("targets"), dyn.AnyKey()), func(p dyn.Path, v dyn.Value) (dyn.Value, error) {
v, d := rewriteComputeIdToClusterId(v, dyn.Path{})
diags = diags.Extend(d)
return v, nil
})
})
diags = diags.Extend(diag.FromErr(err))
return diags
}
func rewriteComputeIdToClusterId(v dyn.Value, p dyn.Path) (dyn.Value, diag.Diagnostics) {
var diags diag.Diagnostics
computeIdPath := p.Append(dyn.Key("compute_id"))
computeId, err := dyn.GetByPath(v, computeIdPath)
// If the "compute_id" key is not set, we don't need to do anything.
if err != nil {
return v, nil
}
if computeId.Kind() == dyn.KindInvalid {
return v, nil
}
diags = diags.Append(diag.Diagnostic{
Severity: diag.Warning,
Summary: "compute_id is deprecated, please use cluster_id instead",
Locations: computeId.Locations(),
Paths: []dyn.Path{computeIdPath},
})
clusterIdPath := p.Append(dyn.Key("cluster_id"))
nv, err := dyn.SetByPath(v, clusterIdPath, computeId)
if err != nil {
return dyn.InvalidValue, diag.FromErr(err)
}
// Drop the "compute_id" key.
vout, err := dyn.Walk(nv, func(p dyn.Path, v dyn.Value) (dyn.Value, error) {
switch len(p) {
case 0:
return v, nil
case 1:
if p[0] == dyn.Key("compute_id") {
return v, dyn.ErrDrop
}
return v, nil
case 2:
if p[1] == dyn.Key("compute_id") {
return v, dyn.ErrDrop
}
}
return v, dyn.ErrSkip
})
diags = diags.Extend(diag.FromErr(err))
return vout, diags
}

View File

@ -0,0 +1,57 @@
package mutator_test
import (
"context"
"testing"
"github.com/databricks/cli/bundle"
"github.com/databricks/cli/bundle/config"
"github.com/databricks/cli/bundle/config/mutator"
"github.com/databricks/cli/libs/diag"
"github.com/stretchr/testify/assert"
)
func TestComputeIdToClusterId(t *testing.T) {
b := &bundle.Bundle{
Config: config.Root{
Bundle: config.Bundle{
ComputeId: "compute-id",
},
},
}
diags := bundle.Apply(context.Background(), b, mutator.ComputeIdToClusterId())
assert.NoError(t, diags.Error())
assert.Equal(t, "compute-id", b.Config.Bundle.ClusterId)
assert.Empty(t, b.Config.Bundle.ComputeId)
assert.Len(t, diags, 1)
assert.Equal(t, "compute_id is deprecated, please use cluster_id instead", diags[0].Summary)
assert.Equal(t, diag.Warning, diags[0].Severity)
}
func TestComputeIdToClusterIdInTargetOverride(t *testing.T) {
b := &bundle.Bundle{
Config: config.Root{
Targets: map[string]*config.Target{
"dev": {
ComputeId: "compute-id-dev",
},
},
},
}
diags := bundle.Apply(context.Background(), b, mutator.ComputeIdToClusterId())
assert.NoError(t, diags.Error())
assert.Empty(t, b.Config.Targets["dev"].ComputeId)
diags = diags.Extend(bundle.Apply(context.Background(), b, mutator.SelectTarget("dev")))
assert.NoError(t, diags.Error())
assert.Equal(t, "compute-id-dev", b.Config.Bundle.ClusterId)
assert.Empty(t, b.Config.Bundle.ComputeId)
assert.Len(t, diags, 1)
assert.Equal(t, "compute_id is deprecated, please use cluster_id instead", diags[0].Summary)
assert.Equal(t, diag.Warning, diags[0].Severity)
}

View File

@ -29,6 +29,10 @@ func (m *defineDefaultWorkspacePaths) Apply(ctx context.Context, b *bundle.Bundl
b.Config.Workspace.FilePath = path.Join(root, "files")
}
if b.Config.Workspace.ResourcePath == "" {
b.Config.Workspace.ResourcePath = path.Join(root, "resources")
}
if b.Config.Workspace.ArtifactPath == "" {
b.Config.Workspace.ArtifactPath = path.Join(root, "artifacts")
}

View File

@ -22,6 +22,7 @@ func TestDefineDefaultWorkspacePaths(t *testing.T) {
diags := bundle.Apply(context.Background(), b, mutator.DefineDefaultWorkspacePaths())
require.NoError(t, diags.Error())
assert.Equal(t, "/files", b.Config.Workspace.FilePath)
assert.Equal(t, "/resources", b.Config.Workspace.ResourcePath)
assert.Equal(t, "/artifacts", b.Config.Workspace.ArtifactPath)
assert.Equal(t, "/state", b.Config.Workspace.StatePath)
}
@ -32,6 +33,7 @@ func TestDefineDefaultWorkspacePathsAlreadySet(t *testing.T) {
Workspace: config.Workspace{
RootPath: "/",
FilePath: "/foo/bar",
ResourcePath: "/foo/bar",
ArtifactPath: "/foo/bar",
StatePath: "/foo/bar",
},
@ -40,6 +42,7 @@ func TestDefineDefaultWorkspacePathsAlreadySet(t *testing.T) {
diags := bundle.Apply(context.Background(), b, mutator.DefineDefaultWorkspacePaths())
require.NoError(t, diags.Error())
assert.Equal(t, "/foo/bar", b.Config.Workspace.FilePath)
assert.Equal(t, "/foo/bar", b.Config.Workspace.ResourcePath)
assert.Equal(t, "/foo/bar", b.Config.Workspace.ArtifactPath)
assert.Equal(t, "/foo/bar", b.Config.Workspace.StatePath)
}

View File

@ -10,6 +10,7 @@ import (
"github.com/databricks/cli/bundle/config"
"github.com/databricks/cli/bundle/config/resources"
"github.com/databricks/cli/bundle/internal/bundletest"
"github.com/databricks/cli/libs/dyn"
"github.com/databricks/databricks-sdk-go/service/compute"
"github.com/databricks/databricks-sdk-go/service/pipelines"
"github.com/stretchr/testify/require"
@ -41,7 +42,7 @@ func TestExpandGlobPathsInPipelines(t *testing.T) {
touchEmptyFile(t, filepath.Join(dir, "skip/test7.py"))
b := &bundle.Bundle{
RootPath: dir,
BundleRootPath: dir,
Config: config.Root{
Resources: config.Resources{
Pipelines: map[string]*resources.Pipeline{
@ -105,8 +106,8 @@ func TestExpandGlobPathsInPipelines(t *testing.T) {
},
}
bundletest.SetLocation(b, ".", filepath.Join(dir, "resource.yml"))
bundletest.SetLocation(b, "resources.pipelines.pipeline.libraries[3]", filepath.Join(dir, "relative", "resource.yml"))
bundletest.SetLocation(b, ".", []dyn.Location{{File: filepath.Join(dir, "resource.yml")}})
bundletest.SetLocation(b, "resources.pipelines.pipeline.libraries[3]", []dyn.Location{{File: filepath.Join(dir, "relative", "resource.yml")}})
m := ExpandPipelineGlobPaths()
diags := bundle.Apply(context.Background(), b, m)

View File

@ -33,7 +33,7 @@ func (m *expandWorkspaceRoot) Apply(ctx context.Context, b *bundle.Bundle) diag.
}
if strings.HasPrefix(root, "~/") {
home := fmt.Sprintf("/Users/%s", currentUser.UserName)
home := fmt.Sprintf("/Workspace/Users/%s", currentUser.UserName)
b.Config.Workspace.RootPath = path.Join(home, root[2:])
}

View File

@ -27,7 +27,7 @@ func TestExpandWorkspaceRoot(t *testing.T) {
}
diags := bundle.Apply(context.Background(), b, mutator.ExpandWorkspaceRoot())
require.NoError(t, diags.Error())
assert.Equal(t, "/Users/jane@doe.com/foo", b.Config.Workspace.RootPath)
assert.Equal(t, "/Workspace/Users/jane@doe.com/foo", b.Config.Workspace.RootPath)
}
func TestExpandWorkspaceRootDoesNothing(t *testing.T) {

View File

@ -56,7 +56,7 @@ func (m *loadGitDetails) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagn
}
// Compute relative path of the bundle root from the Git repo root.
absBundlePath, err := filepath.Abs(b.RootPath)
absBundlePath, err := filepath.Abs(b.BundleRootPath)
if err != nil {
return diag.FromErr(err)
}

View File

@ -23,6 +23,7 @@ func DefaultMutators() []bundle.Mutator {
VerifyCliVersion(),
EnvironmentsToTargets(),
ComputeIdToClusterId(),
InitializeVariables(),
DefineDefaultTarget(),
LoadGitDetails(),

View File

@ -39,22 +39,22 @@ func overrideJobCompute(j *resources.Job, compute string) {
func (m *overrideCompute) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
if b.Config.Bundle.Mode != config.Development {
if b.Config.Bundle.ComputeID != "" {
if b.Config.Bundle.ClusterId != "" {
return diag.Errorf("cannot override compute for an target that does not use 'mode: development'")
}
return nil
}
if v := env.Get(ctx, "DATABRICKS_CLUSTER_ID"); v != "" {
b.Config.Bundle.ComputeID = v
b.Config.Bundle.ClusterId = v
}
if b.Config.Bundle.ComputeID == "" {
if b.Config.Bundle.ClusterId == "" {
return nil
}
r := b.Config.Resources
for i := range r.Jobs {
overrideJobCompute(r.Jobs[i], b.Config.Bundle.ComputeID)
overrideJobCompute(r.Jobs[i], b.Config.Bundle.ClusterId)
}
return nil

View File

@ -20,7 +20,7 @@ func TestOverrideDevelopment(t *testing.T) {
Config: config.Root{
Bundle: config.Bundle{
Mode: config.Development,
ComputeID: "newClusterID",
ClusterId: "newClusterID",
},
Resources: config.Resources{
Jobs: map[string]*resources.Job{
@ -144,7 +144,7 @@ func TestOverrideProduction(t *testing.T) {
b := &bundle.Bundle{
Config: config.Root{
Bundle: config.Bundle{
ComputeID: "newClusterID",
ClusterId: "newClusterID",
},
Resources: config.Resources{
Jobs: map[string]*resources.Job{

View File

@ -0,0 +1,115 @@
package paths
import (
"github.com/databricks/cli/bundle/libraries"
"github.com/databricks/cli/libs/dyn"
)
type jobRewritePattern struct {
pattern dyn.Pattern
kind PathKind
skipRewrite func(string) bool
}
func noSkipRewrite(string) bool {
return false
}
func jobTaskRewritePatterns(base dyn.Pattern) []jobRewritePattern {
return []jobRewritePattern{
{
base.Append(dyn.Key("notebook_task"), dyn.Key("notebook_path")),
PathKindNotebook,
noSkipRewrite,
},
{
base.Append(dyn.Key("spark_python_task"), dyn.Key("python_file")),
PathKindWorkspaceFile,
noSkipRewrite,
},
{
base.Append(dyn.Key("dbt_task"), dyn.Key("project_directory")),
PathKindDirectory,
noSkipRewrite,
},
{
base.Append(dyn.Key("sql_task"), dyn.Key("file"), dyn.Key("path")),
PathKindWorkspaceFile,
noSkipRewrite,
},
{
base.Append(dyn.Key("libraries"), dyn.AnyIndex(), dyn.Key("whl")),
PathKindLibrary,
noSkipRewrite,
},
{
base.Append(dyn.Key("libraries"), dyn.AnyIndex(), dyn.Key("jar")),
PathKindLibrary,
noSkipRewrite,
},
{
base.Append(dyn.Key("libraries"), dyn.AnyIndex(), dyn.Key("requirements")),
PathKindWorkspaceFile,
noSkipRewrite,
},
}
}
func jobRewritePatterns() []jobRewritePattern {
// Base pattern to match all tasks in all jobs.
base := dyn.NewPattern(
dyn.Key("resources"),
dyn.Key("jobs"),
dyn.AnyKey(),
dyn.Key("tasks"),
dyn.AnyIndex(),
)
// Compile list of patterns and their respective rewrite functions.
jobEnvironmentsPatterns := []jobRewritePattern{
{
dyn.NewPattern(
dyn.Key("resources"),
dyn.Key("jobs"),
dyn.AnyKey(),
dyn.Key("environments"),
dyn.AnyIndex(),
dyn.Key("spec"),
dyn.Key("dependencies"),
dyn.AnyIndex(),
),
PathKindWithPrefix,
func(s string) bool {
return !libraries.IsLibraryLocal(s)
},
},
}
taskPatterns := jobTaskRewritePatterns(base)
forEachPatterns := jobTaskRewritePatterns(base.Append(dyn.Key("for_each_task"), dyn.Key("task")))
allPatterns := append(taskPatterns, jobEnvironmentsPatterns...)
allPatterns = append(allPatterns, forEachPatterns...)
return allPatterns
}
// VisitJobPaths visits all paths in job resources and applies a function to each path.
func VisitJobPaths(value dyn.Value, fn VisitFunc) (dyn.Value, error) {
var err error
var newValue = value
for _, rewritePattern := range jobRewritePatterns() {
newValue, err = dyn.MapByPattern(newValue, rewritePattern.pattern, func(p dyn.Path, v dyn.Value) (dyn.Value, error) {
if rewritePattern.skipRewrite(v.MustString()) {
return v, nil
}
return fn(p, rewritePattern.kind, v)
})
if err != nil {
return dyn.InvalidValue, err
}
}
return newValue, nil
}

View File

@ -0,0 +1,168 @@
package paths
import (
"testing"
"github.com/databricks/cli/bundle/config"
"github.com/databricks/cli/bundle/config/resources"
"github.com/databricks/cli/libs/dyn"
assert "github.com/databricks/cli/libs/dyn/dynassert"
"github.com/databricks/databricks-sdk-go/service/compute"
"github.com/databricks/databricks-sdk-go/service/jobs"
"github.com/stretchr/testify/require"
)
func TestVisitJobPaths(t *testing.T) {
task0 := jobs.Task{
NotebookTask: &jobs.NotebookTask{
NotebookPath: "abc",
},
}
task1 := jobs.Task{
SparkPythonTask: &jobs.SparkPythonTask{
PythonFile: "abc",
},
}
task2 := jobs.Task{
DbtTask: &jobs.DbtTask{
ProjectDirectory: "abc",
},
}
task3 := jobs.Task{
SqlTask: &jobs.SqlTask{
File: &jobs.SqlTaskFile{
Path: "abc",
},
},
}
task4 := jobs.Task{
Libraries: []compute.Library{
{Whl: "dist/foo.whl"},
},
}
task5 := jobs.Task{
Libraries: []compute.Library{
{Jar: "dist/foo.jar"},
},
}
task6 := jobs.Task{
Libraries: []compute.Library{
{Requirements: "requirements.txt"},
},
}
job0 := &resources.Job{
JobSettings: &jobs.JobSettings{
Tasks: []jobs.Task{
task0,
task1,
task2,
task3,
task4,
task5,
task6,
},
},
}
root := config.Root{
Resources: config.Resources{
Jobs: map[string]*resources.Job{
"job0": job0,
},
},
}
actual := visitJobPaths(t, root)
expected := []dyn.Path{
dyn.MustPathFromString("resources.jobs.job0.tasks[0].notebook_task.notebook_path"),
dyn.MustPathFromString("resources.jobs.job0.tasks[1].spark_python_task.python_file"),
dyn.MustPathFromString("resources.jobs.job0.tasks[2].dbt_task.project_directory"),
dyn.MustPathFromString("resources.jobs.job0.tasks[3].sql_task.file.path"),
dyn.MustPathFromString("resources.jobs.job0.tasks[4].libraries[0].whl"),
dyn.MustPathFromString("resources.jobs.job0.tasks[5].libraries[0].jar"),
dyn.MustPathFromString("resources.jobs.job0.tasks[6].libraries[0].requirements"),
}
assert.ElementsMatch(t, expected, actual)
}
func TestVisitJobPaths_environments(t *testing.T) {
environment0 := jobs.JobEnvironment{
Spec: &compute.Environment{
Dependencies: []string{
"dist_0/*.whl",
"dist_1/*.whl",
},
},
}
job0 := &resources.Job{
JobSettings: &jobs.JobSettings{
Environments: []jobs.JobEnvironment{
environment0,
},
},
}
root := config.Root{
Resources: config.Resources{
Jobs: map[string]*resources.Job{
"job0": job0,
},
},
}
actual := visitJobPaths(t, root)
expected := []dyn.Path{
dyn.MustPathFromString("resources.jobs.job0.environments[0].spec.dependencies[0]"),
dyn.MustPathFromString("resources.jobs.job0.environments[0].spec.dependencies[1]"),
}
assert.ElementsMatch(t, expected, actual)
}
func TestVisitJobPaths_foreach(t *testing.T) {
task0 := jobs.Task{
ForEachTask: &jobs.ForEachTask{
Task: jobs.Task{
NotebookTask: &jobs.NotebookTask{
NotebookPath: "abc",
},
},
},
}
job0 := &resources.Job{
JobSettings: &jobs.JobSettings{
Tasks: []jobs.Task{
task0,
},
},
}
root := config.Root{
Resources: config.Resources{
Jobs: map[string]*resources.Job{
"job0": job0,
},
},
}
actual := visitJobPaths(t, root)
expected := []dyn.Path{
dyn.MustPathFromString("resources.jobs.job0.tasks[0].for_each_task.task.notebook_task.notebook_path"),
}
assert.ElementsMatch(t, expected, actual)
}
func visitJobPaths(t *testing.T, root config.Root) []dyn.Path {
var actual []dyn.Path
err := root.Mutate(func(value dyn.Value) (dyn.Value, error) {
return VisitJobPaths(value, func(p dyn.Path, kind PathKind, v dyn.Value) (dyn.Value, error) {
actual = append(actual, p)
return v, nil
})
})
require.NoError(t, err)
return actual
}

View File

@ -0,0 +1,26 @@
package paths
import "github.com/databricks/cli/libs/dyn"
type PathKind int
const (
// PathKindLibrary is a path to a library file
PathKindLibrary = iota
// PathKindNotebook is a path to a notebook file
PathKindNotebook
// PathKindWorkspaceFile is a path to a regular workspace file,
// notebooks are not allowed because they are uploaded a special
// kind of workspace object.
PathKindWorkspaceFile
// PathKindWithPrefix is a path that starts with './'
PathKindWithPrefix
// PathKindDirectory is a path to directory
PathKindDirectory
)
type VisitFunc func(path dyn.Path, kind PathKind, value dyn.Value) (dyn.Value, error)

View File

@ -5,8 +5,8 @@ import (
"github.com/databricks/cli/bundle"
"github.com/databricks/cli/bundle/config"
"github.com/databricks/cli/libs/auth"
"github.com/databricks/cli/libs/diag"
"github.com/databricks/cli/libs/iamutil"
"github.com/databricks/cli/libs/tags"
)
@ -33,7 +33,7 @@ func (m *populateCurrentUser) Apply(ctx context.Context, b *bundle.Bundle) diag.
}
b.Config.Workspace.CurrentUser = &config.User{
ShortName: auth.GetShortUserName(me.UserName),
ShortName: iamutil.GetShortUserName(me),
User: me,
}

View File

@ -0,0 +1,67 @@
package mutator
import (
"context"
"fmt"
"strings"
"github.com/databricks/cli/bundle"
"github.com/databricks/cli/libs/diag"
"github.com/databricks/cli/libs/dyn"
)
type prependWorkspacePrefix struct{}
// PrependWorkspacePrefix prepends the workspace root path to all paths in the bundle.
func PrependWorkspacePrefix() bundle.Mutator {
return &prependWorkspacePrefix{}
}
func (m *prependWorkspacePrefix) Name() string {
return "PrependWorkspacePrefix"
}
var skipPrefixes = []string{
"/Workspace/",
"/Volumes/",
}
func (m *prependWorkspacePrefix) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
patterns := []dyn.Pattern{
dyn.NewPattern(dyn.Key("workspace"), dyn.Key("root_path")),
dyn.NewPattern(dyn.Key("workspace"), dyn.Key("file_path")),
dyn.NewPattern(dyn.Key("workspace"), dyn.Key("artifact_path")),
dyn.NewPattern(dyn.Key("workspace"), dyn.Key("state_path")),
}
err := b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) {
var err error
for _, pattern := range patterns {
v, err = dyn.MapByPattern(v, pattern, func(p dyn.Path, pv dyn.Value) (dyn.Value, error) {
path, ok := pv.AsString()
if !ok {
return dyn.InvalidValue, fmt.Errorf("expected string, got %s", v.Kind())
}
for _, prefix := range skipPrefixes {
if strings.HasPrefix(path, prefix) {
return pv, nil
}
}
return dyn.NewValue(fmt.Sprintf("/Workspace%s", path), v.Locations()), nil
})
if err != nil {
return dyn.InvalidValue, err
}
}
return v, nil
})
if err != nil {
return diag.FromErr(err)
}
return nil
}

View File

@ -0,0 +1,79 @@
package mutator
import (
"context"
"testing"
"github.com/databricks/cli/bundle"
"github.com/databricks/cli/bundle/config"
"github.com/databricks/databricks-sdk-go/service/iam"
"github.com/stretchr/testify/require"
)
func TestPrependWorkspacePrefix(t *testing.T) {
testCases := []struct {
path string
expected string
}{
{
path: "/Users/test",
expected: "/Workspace/Users/test",
},
{
path: "/Shared/test",
expected: "/Workspace/Shared/test",
},
{
path: "/Workspace/Users/test",
expected: "/Workspace/Users/test",
},
{
path: "/Volumes/Users/test",
expected: "/Volumes/Users/test",
},
}
for _, tc := range testCases {
b := &bundle.Bundle{
Config: config.Root{
Workspace: config.Workspace{
RootPath: tc.path,
ArtifactPath: tc.path,
FilePath: tc.path,
StatePath: tc.path,
},
},
}
diags := bundle.Apply(context.Background(), b, PrependWorkspacePrefix())
require.Empty(t, diags)
require.Equal(t, tc.expected, b.Config.Workspace.RootPath)
require.Equal(t, tc.expected, b.Config.Workspace.ArtifactPath)
require.Equal(t, tc.expected, b.Config.Workspace.FilePath)
require.Equal(t, tc.expected, b.Config.Workspace.StatePath)
}
}
func TestPrependWorkspaceForDefaultConfig(t *testing.T) {
b := &bundle.Bundle{
Config: config.Root{
Bundle: config.Bundle{
Name: "test",
Target: "dev",
},
Workspace: config.Workspace{
CurrentUser: &config.User{
User: &iam.User{
UserName: "jane@doe.com",
},
},
},
},
}
diags := bundle.Apply(context.Background(), b, bundle.Seq(DefineDefaultWorkspaceRoot(), ExpandWorkspaceRoot(), DefineDefaultWorkspacePaths(), PrependWorkspacePrefix()))
require.Empty(t, diags)
require.Equal(t, "/Workspace/Users/jane@doe.com/.bundle/test/dev", b.Config.Workspace.RootPath)
require.Equal(t, "/Workspace/Users/jane@doe.com/.bundle/test/dev/artifacts", b.Config.Workspace.ArtifactPath)
require.Equal(t, "/Workspace/Users/jane@doe.com/.bundle/test/dev/files", b.Config.Workspace.FilePath)
require.Equal(t, "/Workspace/Users/jane@doe.com/.bundle/test/dev/state", b.Config.Workspace.StatePath)
}

View File

@ -7,9 +7,9 @@ import (
"github.com/databricks/cli/bundle"
"github.com/databricks/cli/bundle/config"
"github.com/databricks/cli/libs/auth"
"github.com/databricks/cli/libs/diag"
"github.com/databricks/cli/libs/dyn"
"github.com/databricks/cli/libs/iamutil"
"github.com/databricks/cli/libs/log"
)
@ -65,6 +65,7 @@ func transformDevelopmentMode(ctx context.Context, b *bundle.Bundle) {
}
func validateDevelopmentMode(b *bundle.Bundle) diag.Diagnostics {
var diags diag.Diagnostics
p := b.Config.Presets
u := b.Config.Workspace.CurrentUser
@ -75,46 +76,61 @@ func validateDevelopmentMode(b *bundle.Bundle) diag.Diagnostics {
// status to UNPAUSED at the level of an individual object, whic hwas
// historically allowed.)
if p.TriggerPauseStatus == config.Unpaused {
return diag.Diagnostics{{
diags = diags.Append(diag.Diagnostic{
Severity: diag.Error,
Summary: "target with 'mode: development' cannot set trigger pause status to UNPAUSED by default",
Locations: []dyn.Location{b.Config.GetLocation("presets.trigger_pause_status")},
}}
})
}
// Make sure this development copy has unique names and paths to avoid conflicts
if path := findNonUserPath(b); path != "" {
return diag.Errorf("%s must start with '~/' or contain the current username when using 'mode: development'", path)
if path == "artifact_path" && strings.HasPrefix(b.Config.Workspace.ArtifactPath, "/Volumes") {
// For Volumes paths we recommend including the current username as a substring
diags = diags.Extend(diag.Errorf("%s should contain the current username or ${workspace.current_user.short_name} to ensure uniqueness when using 'mode: development'", path))
} else {
// For non-Volumes paths recommend simply putting things in the home folder
diags = diags.Extend(diag.Errorf("%s must start with '~/' or contain the current username to ensure uniqueness when using 'mode: development'", path))
}
}
if p.NamePrefix != "" && !strings.Contains(p.NamePrefix, u.ShortName) && !strings.Contains(p.NamePrefix, u.UserName) {
// Resources such as pipelines require a unique name, e.g. '[dev steve] my_pipeline'.
// For this reason we require the name prefix to contain the current username;
// it's a pitfall for users if they don't include it and later find out that
// only a single user can do development deployments.
return diag.Diagnostics{{
diags = diags.Append(diag.Diagnostic{
Severity: diag.Error,
Summary: "prefix should contain the current username or ${workspace.current_user.short_name} to ensure uniqueness when using 'mode: development'",
Locations: []dyn.Location{b.Config.GetLocation("presets.name_prefix")},
}}
})
}
return nil
return diags
}
// findNonUserPath finds the first workspace path such as root_path that doesn't
// contain the current username or current user's shortname.
func findNonUserPath(b *bundle.Bundle) string {
containsName := func(path string) bool {
username := b.Config.Workspace.CurrentUser.UserName
shortname := b.Config.Workspace.CurrentUser.ShortName
return strings.Contains(path, username) || strings.Contains(path, shortname)
}
if b.Config.Workspace.RootPath != "" && !strings.Contains(b.Config.Workspace.RootPath, username) {
if b.Config.Workspace.RootPath != "" && !containsName(b.Config.Workspace.RootPath) {
return "root_path"
}
if b.Config.Workspace.StatePath != "" && !strings.Contains(b.Config.Workspace.StatePath, username) {
return "state_path"
}
if b.Config.Workspace.FilePath != "" && !strings.Contains(b.Config.Workspace.FilePath, username) {
if b.Config.Workspace.FilePath != "" && !containsName(b.Config.Workspace.FilePath) {
return "file_path"
}
if b.Config.Workspace.ArtifactPath != "" && !strings.Contains(b.Config.Workspace.ArtifactPath, username) {
if b.Config.Workspace.ResourcePath != "" && !containsName(b.Config.Workspace.ResourcePath) {
return "resource_path"
}
if b.Config.Workspace.ArtifactPath != "" && !containsName(b.Config.Workspace.ArtifactPath) {
return "artifact_path"
}
if b.Config.Workspace.StatePath != "" && !containsName(b.Config.Workspace.StatePath) {
return "state_path"
}
return ""
}
@ -183,7 +199,7 @@ func (m *processTargetMode) Apply(ctx context.Context, b *bundle.Bundle) diag.Di
transformDevelopmentMode(ctx, b)
return diags
case config.Production:
isPrincipal := auth.IsServicePrincipal(b.Config.Workspace.CurrentUser.UserName)
isPrincipal := iamutil.IsServicePrincipal(b.Config.Workspace.CurrentUser.User)
return validateProductionMode(ctx, b, isPrincipal)
case "":
// No action

View File

@ -13,6 +13,7 @@ import (
"github.com/databricks/cli/libs/tags"
sdkconfig "github.com/databricks/databricks-sdk-go/config"
"github.com/databricks/databricks-sdk-go/service/catalog"
"github.com/databricks/databricks-sdk-go/service/compute"
"github.com/databricks/databricks-sdk-go/service/iam"
"github.com/databricks/databricks-sdk-go/service/jobs"
"github.com/databricks/databricks-sdk-go/service/ml"
@ -119,6 +120,9 @@ func mockBundle(mode config.Mode) *bundle.Bundle {
Schemas: map[string]*resources.Schema{
"schema1": {CreateSchema: &catalog.CreateSchema{Name: "schema1"}},
},
Clusters: map[string]*resources.Cluster{
"cluster1": {ClusterSpec: &compute.ClusterSpec{ClusterName: "cluster1", SparkVersion: "13.2.x", NumWorkers: 1}},
},
},
},
// Use AWS implementation for testing.
@ -177,6 +181,9 @@ func TestProcessTargetModeDevelopment(t *testing.T) {
// Schema 1
assert.Equal(t, "dev_lennart_schema1", b.Config.Resources.Schemas["schema1"].Name)
// Clusters
assert.Equal(t, "[dev lennart] cluster1", b.Config.Resources.Clusters["cluster1"].ClusterName)
}
func TestProcessTargetModeDevelopmentTagNormalizationForAws(t *testing.T) {
@ -230,10 +237,20 @@ func TestValidateDevelopmentMode(t *testing.T) {
diags := validateDevelopmentMode(b)
require.NoError(t, diags.Error())
// Test with /Volumes path
b = mockBundle(config.Development)
b.Config.Workspace.ArtifactPath = "/Volumes/catalog/schema/lennart/libs"
diags = validateDevelopmentMode(b)
require.NoError(t, diags.Error())
b.Config.Workspace.ArtifactPath = "/Volumes/catalog/schema/libs"
diags = validateDevelopmentMode(b)
require.ErrorContains(t, diags.Error(), "artifact_path should contain the current username or ${workspace.current_user.short_name} to ensure uniqueness when using 'mode: development'")
// Test with a bundle that has a non-user path
b = mockBundle(config.Development)
b.Config.Workspace.RootPath = "/Shared/.bundle/x/y/state"
diags = validateDevelopmentMode(b)
require.ErrorContains(t, diags.Error(), "root_path")
require.ErrorContains(t, diags.Error(), "root_path must start with '~/' or contain the current username to ensure uniqueness when using 'mode: development'")
// Test with a bundle that has an unpaused trigger pause status
b = mockBundle(config.Development)
@ -271,6 +288,7 @@ func TestProcessTargetModeDefault(t *testing.T) {
assert.Equal(t, "servingendpoint1", b.Config.Resources.ModelServingEndpoints["servingendpoint1"].Name)
assert.Equal(t, "registeredmodel1", b.Config.Resources.RegisteredModels["registeredmodel1"].Name)
assert.Equal(t, "qualityMonitor1", b.Config.Resources.QualityMonitors["qualityMonitor1"].TableName)
assert.Equal(t, "cluster1", b.Config.Resources.Clusters["cluster1"].ClusterName)
}
func TestProcessTargetModeProduction(t *testing.T) {
@ -302,6 +320,7 @@ func TestProcessTargetModeProduction(t *testing.T) {
b.Config.Resources.Experiments["experiment2"].Permissions = permissions
b.Config.Resources.Models["model1"].Permissions = permissions
b.Config.Resources.ModelServingEndpoints["servingendpoint1"].Permissions = permissions
b.Config.Resources.Clusters["cluster1"].Permissions = permissions
diags = validateProductionMode(context.Background(), b, false)
require.NoError(t, diags.Error())
@ -312,6 +331,7 @@ func TestProcessTargetModeProduction(t *testing.T) {
assert.Equal(t, "servingendpoint1", b.Config.Resources.ModelServingEndpoints["servingendpoint1"].Name)
assert.Equal(t, "registeredmodel1", b.Config.Resources.RegisteredModels["registeredmodel1"].Name)
assert.Equal(t, "qualityMonitor1", b.Config.Resources.QualityMonitors["qualityMonitor1"].TableName)
assert.Equal(t, "cluster1", b.Config.Resources.Clusters["cluster1"].ClusterName)
}
func TestProcessTargetModeProductionOkForPrincipal(t *testing.T) {

View File

@ -1,15 +1,21 @@
package python
import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"io"
"os"
"path/filepath"
"github.com/databricks/cli/libs/python"
"github.com/databricks/databricks-sdk-go/logger"
"github.com/fatih/color"
"strings"
"github.com/databricks/cli/libs/python"
"github.com/databricks/cli/bundle/env"
@ -102,7 +108,7 @@ func (m *pythonMutator) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagno
return dyn.InvalidValue, fmt.Errorf("failed to create cache dir: %w", err)
}
rightRoot, diags := m.runPythonMutator(ctx, cacheDir, b.RootPath, pythonPath, leftRoot)
rightRoot, diags := m.runPythonMutator(ctx, cacheDir, b.BundleRootPath, pythonPath, leftRoot)
mutateDiags = diags
if diags.HasError() {
return dyn.InvalidValue, mutateDiagsHasError
@ -169,7 +175,11 @@ func (m *pythonMutator) runPythonMutator(ctx context.Context, cacheDir string, r
return dyn.InvalidValue, diag.Errorf("failed to write input file: %s", err)
}
stderrWriter := newLogWriter(ctx, "stderr: ")
stderrBuf := bytes.Buffer{}
stderrWriter := io.MultiWriter(
newLogWriter(ctx, "stderr: "),
&stderrBuf,
)
stdoutWriter := newLogWriter(ctx, "stdout: ")
_, processErr := process.Background(
@ -197,7 +207,13 @@ func (m *pythonMutator) runPythonMutator(ctx context.Context, cacheDir string, r
// process can fail without reporting errors in diagnostics file or creating it, for instance,
// venv doesn't have PyDABs library installed
if processErr != nil {
return dyn.InvalidValue, diag.Errorf("python mutator process failed: %sw, use --debug to enable logging", processErr)
diagnostic := diag.Diagnostic{
Severity: diag.Error,
Summary: fmt.Sprintf("python mutator process failed: %q, use --debug to enable logging", processErr),
Detail: explainProcessErr(stderrBuf.String()),
}
return dyn.InvalidValue, diag.Diagnostics{diagnostic}
}
// or we can fail to read diagnostics file, that should always be created
@ -205,15 +221,40 @@ func (m *pythonMutator) runPythonMutator(ctx context.Context, cacheDir string, r
return dyn.InvalidValue, diag.Errorf("failed to load diagnostics: %s", pythonDiagnosticsErr)
}
output, err := loadOutputFile(rootPath, outputPath)
if err != nil {
return dyn.InvalidValue, diag.Errorf("failed to load Python mutator output: %s", err)
}
output, outputDiags := loadOutputFile(rootPath, outputPath)
pythonDiagnostics = pythonDiagnostics.Extend(outputDiags)
// we pass through pythonDiagnostic because it contains warnings
return output, pythonDiagnostics
}
const installExplanation = `If using Python wheels, ensure that 'databricks-pydabs' is included in the dependencies,
and that the wheel is installed in the Python environment:
$ .venv/bin/pip install -e .
If using a virtual environment, ensure it is specified as the venv_path property in databricks.yml,
or activate the environment before running CLI commands:
experimental:
pydabs:
venv_path: .venv
`
// explainProcessErr provides additional explanation for common errors.
// It's meant to be the best effort, and not all errors are covered.
// Output should be used only used for error reporting.
func explainProcessErr(stderr string) string {
// implemented in cpython/Lib/runpy.py and portable across Python 3.x, including pypy
if strings.Contains(stderr, "Error while finding module specification for 'databricks.bundles.build'") {
summary := color.CyanString("Explanation: ") + "'databricks-pydabs' library is not installed in the Python environment.\n"
return stderr + "\n" + summary + "\n" + installExplanation
}
return stderr
}
func writeInputFile(inputPath string, input dyn.Value) error {
// we need to marshal dyn.Value instead of bundle.Config to JSON to support
// non-string fields assigned with bundle variables
@ -225,10 +266,10 @@ func writeInputFile(inputPath string, input dyn.Value) error {
return os.WriteFile(inputPath, rootConfigJson, 0600)
}
func loadOutputFile(rootPath string, outputPath string) (dyn.Value, error) {
func loadOutputFile(rootPath string, outputPath string) (dyn.Value, diag.Diagnostics) {
outputFile, err := os.Open(outputPath)
if err != nil {
return dyn.InvalidValue, fmt.Errorf("failed to open output file: %w", err)
return dyn.InvalidValue, diag.FromErr(fmt.Errorf("failed to open output file: %w", err))
}
defer outputFile.Close()
@ -243,27 +284,34 @@ func loadOutputFile(rootPath string, outputPath string) (dyn.Value, error) {
// for that, we pass virtualPath instead of outputPath as file location
virtualPath, err := filepath.Abs(filepath.Join(rootPath, "__generated_by_pydabs__.yml"))
if err != nil {
return dyn.InvalidValue, fmt.Errorf("failed to get absolute path: %w", err)
return dyn.InvalidValue, diag.FromErr(fmt.Errorf("failed to get absolute path: %w", err))
}
generated, err := yamlloader.LoadYAML(virtualPath, outputFile)
if err != nil {
return dyn.InvalidValue, fmt.Errorf("failed to parse output file: %w", err)
return dyn.InvalidValue, diag.FromErr(fmt.Errorf("failed to parse output file: %w", err))
}
normalized, diagnostic := convert.Normalize(config.Root{}, generated)
if diagnostic.Error() != nil {
return dyn.InvalidValue, fmt.Errorf("failed to normalize output: %w", diagnostic.Error())
return strictNormalize(config.Root{}, generated)
}
func strictNormalize(dst any, generated dyn.Value) (dyn.Value, diag.Diagnostics) {
normalized, diags := convert.Normalize(dst, generated)
// warnings shouldn't happen because output should be already normalized
// when it happens, it's a bug in the mutator, and should be treated as an error
for _, d := range diagnostic.Filter(diag.Warning) {
return dyn.InvalidValue, fmt.Errorf("failed to normalize output: %s", d.Summary)
strictDiags := diag.Diagnostics{}
for _, d := range diags {
if d.Severity == diag.Warning {
d.Severity = diag.Error
}
return normalized, nil
strictDiags = strictDiags.Append(d)
}
return normalized, strictDiags
}
// loadDiagnosticsFile loads diagnostics from a file.

View File

@ -10,6 +10,8 @@ import (
"runtime"
"testing"
"github.com/databricks/cli/libs/dyn/convert"
"github.com/databricks/cli/libs/dyn/merge"
"github.com/databricks/cli/bundle/env"
@ -255,7 +257,7 @@ func TestPythonMutator_badOutput(t *testing.T) {
mutator := PythonMutator(PythonMutatorPhaseLoad)
diag := bundle.Apply(ctx, b, mutator)
assert.EqualError(t, diag.Error(), "failed to load Python mutator output: failed to normalize output: unknown field: unknown_property")
assert.EqualError(t, diag.Error(), "unknown field: unknown_property")
}
func TestPythonMutator_disabled(t *testing.T) {
@ -546,6 +548,46 @@ func TestInterpreterPath(t *testing.T) {
}
}
func TestStrictNormalize(t *testing.T) {
// NB: there is no way to trigger diag.Error, so we don't test it
type TestStruct struct {
A int `json:"a"`
}
value := dyn.NewValue(map[string]dyn.Value{"A": dyn.NewValue("abc", nil)}, nil)
_, diags := convert.Normalize(TestStruct{}, value)
_, strictDiags := strictNormalize(TestStruct{}, value)
assert.False(t, diags.HasError())
assert.True(t, strictDiags.HasError())
}
func TestExplainProcessErr(t *testing.T) {
stderr := "/home/test/.venv/bin/python3: Error while finding module specification for 'databricks.bundles.build' (ModuleNotFoundError: No module named 'databricks')\n"
expected := `/home/test/.venv/bin/python3: Error while finding module specification for 'databricks.bundles.build' (ModuleNotFoundError: No module named 'databricks')
Explanation: 'databricks-pydabs' library is not installed in the Python environment.
If using Python wheels, ensure that 'databricks-pydabs' is included in the dependencies,
and that the wheel is installed in the Python environment:
$ .venv/bin/pip install -e .
If using a virtual environment, ensure it is specified as the venv_path property in databricks.yml,
or activate the environment before running CLI commands:
experimental:
pydabs:
venv_path: .venv
`
out := explainProcessErr(stderr)
assert.Equal(t, expected, out)
}
func withProcessStub(t *testing.T, args []string, output string, diagnostics string) context.Context {
ctx := context.Background()
ctx, stub := process.WithStub(ctx)

View File

@ -2,7 +2,6 @@ package mutator
import (
"context"
"fmt"
"testing"
"github.com/databricks/cli/bundle"
@ -44,11 +43,13 @@ func TestResolveClusterReference(t *testing.T) {
m := mocks.NewMockWorkspaceClient(t)
b.SetWorkpaceClient(m.WorkspaceClient)
clusterApi := m.GetMockClustersAPI()
clusterApi.EXPECT().GetByClusterName(mock.Anything, clusterRef1).Return(&compute.ClusterDetails{
ClusterId: "1234-5678-abcd",
}, nil)
clusterApi.EXPECT().GetByClusterName(mock.Anything, clusterRef2).Return(&compute.ClusterDetails{
ClusterId: "9876-5432-xywz",
clusterApi.EXPECT().ListAll(mock.Anything, compute.ListClustersRequest{
FilterBy: &compute.ListClustersFilterBy{
ClusterSources: []compute.ClusterSource{compute.ClusterSourceApi, compute.ClusterSourceUi},
},
}).Return([]compute.ClusterDetails{
{ClusterId: "1234-5678-abcd", ClusterName: clusterRef1},
{ClusterId: "9876-5432-xywz", ClusterName: clusterRef2},
}, nil)
diags := bundle.Apply(context.Background(), b, ResolveResourceReferences())
@ -78,10 +79,16 @@ func TestResolveNonExistentClusterReference(t *testing.T) {
m := mocks.NewMockWorkspaceClient(t)
b.SetWorkpaceClient(m.WorkspaceClient)
clusterApi := m.GetMockClustersAPI()
clusterApi.EXPECT().GetByClusterName(mock.Anything, clusterRef).Return(nil, fmt.Errorf("ClusterDetails named '%s' does not exist", clusterRef))
clusterApi.EXPECT().ListAll(mock.Anything, compute.ListClustersRequest{
FilterBy: &compute.ListClustersFilterBy{
ClusterSources: []compute.ClusterSource{compute.ClusterSourceApi, compute.ClusterSourceUi},
},
}).Return([]compute.ClusterDetails{
{ClusterId: "1234-5678-abcd", ClusterName: "some other cluster"},
}, nil)
diags := bundle.Apply(context.Background(), b, ResolveResourceReferences())
require.ErrorContains(t, diags.Error(), "failed to resolve cluster: Random, err: ClusterDetails named 'Random' does not exist")
require.ErrorContains(t, diags.Error(), "failed to resolve cluster: Random, err: cluster named 'Random' does not exist")
}
func TestNoLookupIfVariableIsSet(t *testing.T) {
@ -158,8 +165,14 @@ func TestResolveVariableReferencesInVariableLookups(t *testing.T) {
m := mocks.NewMockWorkspaceClient(t)
b.SetWorkpaceClient(m.WorkspaceClient)
clusterApi := m.GetMockClustersAPI()
clusterApi.EXPECT().GetByClusterName(mock.Anything, "cluster-bar-dev").Return(&compute.ClusterDetails{
ClusterId: "1234-5678-abcd",
clusterApi.EXPECT().ListAll(mock.Anything, compute.ListClustersRequest{
FilterBy: &compute.ListClustersFilterBy{
ClusterSources: []compute.ClusterSource{compute.ClusterSourceApi, compute.ClusterSourceUi},
},
}).Return([]compute.ClusterDetails{
{ClusterId: "1234-5678-abcd", ClusterName: "cluster-bar-dev"},
{ClusterId: "9876-5432-xywz", ClusterName: "some other cluster"},
}, nil)
diags := bundle.Apply(context.Background(), b, bundle.Seq(ResolveVariableReferencesInLookup(), ResolveResourceReferences()))

View File

@ -10,7 +10,6 @@ import (
"github.com/databricks/cli/libs/dyn"
"github.com/databricks/cli/libs/dyn/convert"
"github.com/databricks/cli/libs/dyn/dynvar"
"github.com/databricks/cli/libs/log"
)
type resolveVariableReferences struct {
@ -124,6 +123,7 @@ func (m *resolveVariableReferences) Apply(ctx context.Context, b *bundle.Bundle)
// We rewrite it here to make the resolution logic simpler.
varPath := dyn.NewPath(dyn.Key("var"))
var diags diag.Diagnostics
err := b.Config.Mutate(func(root dyn.Value) (dyn.Value, error) {
// Synthesize a copy of the root that has all fields that are present in the type
// but not set in the dynamic value set to their corresponding empty value.
@ -180,14 +180,13 @@ func (m *resolveVariableReferences) Apply(ctx context.Context, b *bundle.Bundle)
// Normalize the result because variable resolution may have been applied to non-string fields.
// For example, a variable reference may have been resolved to a integer.
root, diags := convert.Normalize(b.Config, root)
for _, diag := range diags {
// This occurs when a variable's resolved value is incompatible with the field's type.
// Log a warning until we have a better way to surface these diagnostics to the user.
log.Warnf(ctx, "normalization diagnostic: %s", diag.Summary)
}
root, normaliseDiags := convert.Normalize(b.Config, root)
diags = diags.Extend(normaliseDiags)
return root, nil
})
return diag.FromErr(err)
if err != nil {
diags = diags.Extend(diag.FromErr(err))
}
return diags
}

View File

@ -45,15 +45,15 @@ func (m *rewriteSyncPaths) makeRelativeTo(root string) dyn.MapFunc {
func (m *rewriteSyncPaths) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
err := b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) {
return dyn.Map(v, "sync", func(_ dyn.Path, v dyn.Value) (nv dyn.Value, err error) {
v, err = dyn.Map(v, "paths", dyn.Foreach(m.makeRelativeTo(b.RootPath)))
v, err = dyn.Map(v, "paths", dyn.Foreach(m.makeRelativeTo(b.BundleRootPath)))
if err != nil {
return dyn.InvalidValue, err
}
v, err = dyn.Map(v, "include", dyn.Foreach(m.makeRelativeTo(b.RootPath)))
v, err = dyn.Map(v, "include", dyn.Foreach(m.makeRelativeTo(b.BundleRootPath)))
if err != nil {
return dyn.InvalidValue, err
}
v, err = dyn.Map(v, "exclude", dyn.Foreach(m.makeRelativeTo(b.RootPath)))
v, err = dyn.Map(v, "exclude", dyn.Foreach(m.makeRelativeTo(b.BundleRootPath)))
if err != nil {
return dyn.InvalidValue, err
}

View File

@ -9,12 +9,13 @@ import (
"github.com/databricks/cli/bundle/config"
"github.com/databricks/cli/bundle/config/mutator"
"github.com/databricks/cli/bundle/internal/bundletest"
"github.com/databricks/cli/libs/dyn"
"github.com/stretchr/testify/assert"
)
func TestRewriteSyncPathsRelative(t *testing.T) {
b := &bundle.Bundle{
RootPath: ".",
BundleRootPath: ".",
Config: config.Root{
Sync: config.Sync{
Paths: []string{
@ -33,12 +34,12 @@ func TestRewriteSyncPathsRelative(t *testing.T) {
},
}
bundletest.SetLocation(b, "sync.paths[0]", "./databricks.yml")
bundletest.SetLocation(b, "sync.paths[1]", "./databricks.yml")
bundletest.SetLocation(b, "sync.include[0]", "./file.yml")
bundletest.SetLocation(b, "sync.include[1]", "./a/file.yml")
bundletest.SetLocation(b, "sync.exclude[0]", "./a/b/file.yml")
bundletest.SetLocation(b, "sync.exclude[1]", "./a/b/c/file.yml")
bundletest.SetLocation(b, "sync.paths[0]", []dyn.Location{{File: "./databricks.yml"}})
bundletest.SetLocation(b, "sync.paths[1]", []dyn.Location{{File: "./databricks.yml"}})
bundletest.SetLocation(b, "sync.include[0]", []dyn.Location{{File: "./file.yml"}})
bundletest.SetLocation(b, "sync.include[1]", []dyn.Location{{File: "./a/file.yml"}})
bundletest.SetLocation(b, "sync.exclude[0]", []dyn.Location{{File: "./a/b/file.yml"}})
bundletest.SetLocation(b, "sync.exclude[1]", []dyn.Location{{File: "./a/b/c/file.yml"}})
diags := bundle.Apply(context.Background(), b, mutator.RewriteSyncPaths())
assert.NoError(t, diags.Error())
@ -53,7 +54,7 @@ func TestRewriteSyncPathsRelative(t *testing.T) {
func TestRewriteSyncPathsAbsolute(t *testing.T) {
b := &bundle.Bundle{
RootPath: "/tmp/dir",
BundleRootPath: "/tmp/dir",
Config: config.Root{
Sync: config.Sync{
Paths: []string{
@ -72,12 +73,12 @@ func TestRewriteSyncPathsAbsolute(t *testing.T) {
},
}
bundletest.SetLocation(b, "sync.paths[0]", "/tmp/dir/databricks.yml")
bundletest.SetLocation(b, "sync.paths[1]", "/tmp/dir/databricks.yml")
bundletest.SetLocation(b, "sync.include[0]", "/tmp/dir/file.yml")
bundletest.SetLocation(b, "sync.include[1]", "/tmp/dir/a/file.yml")
bundletest.SetLocation(b, "sync.exclude[0]", "/tmp/dir/a/b/file.yml")
bundletest.SetLocation(b, "sync.exclude[1]", "/tmp/dir/a/b/c/file.yml")
bundletest.SetLocation(b, "sync.paths[0]", []dyn.Location{{File: "/tmp/dir/databricks.yml"}})
bundletest.SetLocation(b, "sync.paths[1]", []dyn.Location{{File: "/tmp/dir/databricks.yml"}})
bundletest.SetLocation(b, "sync.include[0]", []dyn.Location{{File: "/tmp/dir/file.yml"}})
bundletest.SetLocation(b, "sync.include[1]", []dyn.Location{{File: "/tmp/dir/a/file.yml"}})
bundletest.SetLocation(b, "sync.exclude[0]", []dyn.Location{{File: "/tmp/dir/a/b/file.yml"}})
bundletest.SetLocation(b, "sync.exclude[1]", []dyn.Location{{File: "/tmp/dir/a/b/c/file.yml"}})
diags := bundle.Apply(context.Background(), b, mutator.RewriteSyncPaths())
assert.NoError(t, diags.Error())
@ -93,7 +94,7 @@ func TestRewriteSyncPathsAbsolute(t *testing.T) {
func TestRewriteSyncPathsErrorPaths(t *testing.T) {
t.Run("no sync block", func(t *testing.T) {
b := &bundle.Bundle{
RootPath: ".",
BundleRootPath: ".",
}
diags := bundle.Apply(context.Background(), b, mutator.RewriteSyncPaths())
@ -102,7 +103,7 @@ func TestRewriteSyncPathsErrorPaths(t *testing.T) {
t.Run("empty include/exclude blocks", func(t *testing.T) {
b := &bundle.Bundle{
RootPath: ".",
BundleRootPath: ".",
Config: config.Root{
Sync: config.Sync{
Include: []string{},

View File

@ -0,0 +1,72 @@
package mutator
import (
"context"
"fmt"
"strings"
"github.com/databricks/cli/bundle"
"github.com/databricks/cli/libs/diag"
"github.com/databricks/cli/libs/dyn"
)
type rewriteWorkspacePrefix struct{}
// RewriteWorkspacePrefix finds any strings in bundle configration that have
// workspace prefix plus workspace path variable used and removes workspace prefix from it.
func RewriteWorkspacePrefix() bundle.Mutator {
return &rewriteWorkspacePrefix{}
}
func (m *rewriteWorkspacePrefix) Name() string {
return "RewriteWorkspacePrefix"
}
func (m *rewriteWorkspacePrefix) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
diags := diag.Diagnostics{}
paths := map[string]string{
"/Workspace/${workspace.root_path}": "${workspace.root_path}",
"/Workspace${workspace.root_path}": "${workspace.root_path}",
"/Workspace/${workspace.file_path}": "${workspace.file_path}",
"/Workspace${workspace.file_path}": "${workspace.file_path}",
"/Workspace/${workspace.artifact_path}": "${workspace.artifact_path}",
"/Workspace${workspace.artifact_path}": "${workspace.artifact_path}",
"/Workspace/${workspace.state_path}": "${workspace.state_path}",
"/Workspace${workspace.state_path}": "${workspace.state_path}",
}
err := b.Config.Mutate(func(root dyn.Value) (dyn.Value, error) {
// Walk through the bundle configuration, check all the string leafs and
// see if any of the prefixes are used in the remote path.
return dyn.Walk(root, func(p dyn.Path, v dyn.Value) (dyn.Value, error) {
vv, ok := v.AsString()
if !ok {
return v, nil
}
for path, replacePath := range paths {
if strings.Contains(vv, path) {
newPath := strings.Replace(vv, path, replacePath, 1)
diags = append(diags, diag.Diagnostic{
Severity: diag.Warning,
Summary: fmt.Sprintf("substring %q found in %q. Please update this to %q.", path, vv, newPath),
Detail: "For more information, please refer to: https://docs.databricks.com/en/release-notes/dev-tools/bundles.html#workspace-paths",
Locations: v.Locations(),
Paths: []dyn.Path{p},
})
// Remove the workspace prefix from the string.
return dyn.NewValue(newPath, v.Locations()), nil
}
}
return v, nil
})
})
if err != nil {
return diag.FromErr(err)
}
return diags
}

View File

@ -0,0 +1,85 @@
package mutator
import (
"context"
"testing"
"github.com/databricks/cli/bundle"
"github.com/databricks/cli/bundle/config"
"github.com/databricks/cli/bundle/config/resources"
"github.com/databricks/cli/libs/diag"
"github.com/databricks/databricks-sdk-go/service/compute"
"github.com/databricks/databricks-sdk-go/service/jobs"
"github.com/stretchr/testify/require"
)
func TestNoWorkspacePrefixUsed(t *testing.T) {
b := &bundle.Bundle{
Config: config.Root{
Workspace: config.Workspace{
RootPath: "/Workspace/Users/test",
ArtifactPath: "/Workspace/Users/test/artifacts",
FilePath: "/Workspace/Users/test/files",
StatePath: "/Workspace/Users/test/state",
},
Resources: config.Resources{
Jobs: map[string]*resources.Job{
"test_job": {
JobSettings: &jobs.JobSettings{
Tasks: []jobs.Task{
{
SparkPythonTask: &jobs.SparkPythonTask{
PythonFile: "/Workspace/${workspace.root_path}/file1.py",
},
},
{
NotebookTask: &jobs.NotebookTask{
NotebookPath: "/Workspace${workspace.file_path}/notebook1",
},
Libraries: []compute.Library{
{
Jar: "/Workspace/${workspace.artifact_path}/jar1.jar",
},
},
},
{
NotebookTask: &jobs.NotebookTask{
NotebookPath: "${workspace.file_path}/notebook2",
},
Libraries: []compute.Library{
{
Jar: "${workspace.artifact_path}/jar2.jar",
},
},
},
},
},
},
},
},
},
}
diags := bundle.Apply(context.Background(), b, RewriteWorkspacePrefix())
require.Len(t, diags, 3)
expectedErrors := map[string]bool{
`substring "/Workspace/${workspace.root_path}" found in "/Workspace/${workspace.root_path}/file1.py". Please update this to "${workspace.root_path}/file1.py".`: true,
`substring "/Workspace${workspace.file_path}" found in "/Workspace${workspace.file_path}/notebook1". Please update this to "${workspace.file_path}/notebook1".`: true,
`substring "/Workspace/${workspace.artifact_path}" found in "/Workspace/${workspace.artifact_path}/jar1.jar". Please update this to "${workspace.artifact_path}/jar1.jar".`: true,
}
for _, d := range diags {
require.Equal(t, d.Severity, diag.Warning)
require.Contains(t, expectedErrors, d.Summary)
delete(expectedErrors, d.Summary)
}
require.Equal(t, "${workspace.root_path}/file1.py", b.Config.Resources.Jobs["test_job"].JobSettings.Tasks[0].SparkPythonTask.PythonFile)
require.Equal(t, "${workspace.file_path}/notebook1", b.Config.Resources.Jobs["test_job"].JobSettings.Tasks[1].NotebookTask.NotebookPath)
require.Equal(t, "${workspace.artifact_path}/jar1.jar", b.Config.Resources.Jobs["test_job"].JobSettings.Tasks[1].Libraries[0].Jar)
require.Equal(t, "${workspace.file_path}/notebook2", b.Config.Resources.Jobs["test_job"].JobSettings.Tasks[2].NotebookTask.NotebookPath)
require.Equal(t, "${workspace.artifact_path}/jar2.jar", b.Config.Resources.Jobs["test_job"].JobSettings.Tasks[2].Libraries[0].Jar)
}

View File

@ -30,50 +30,44 @@ func (m *setRunAs) Name() string {
return "SetRunAs"
}
type errUnsupportedResourceTypeForRunAs struct {
resourceType string
resourceLocation dyn.Location
currentUser string
runAsUser string
func reportRunAsNotSupported(resourceType string, location dyn.Location, currentUser string, runAsUser string) diag.Diagnostics {
return diag.Diagnostics{{
Summary: fmt.Sprintf("%s do not support a setting a run_as user that is different from the owner.\n"+
"Current identity: %s. Run as identity: %s.\n"+
"See https://docs.databricks.com/dev-tools/bundles/run-as.html to learn more about the run_as property.", resourceType, currentUser, runAsUser),
Locations: []dyn.Location{location},
Severity: diag.Error,
}}
}
func (e errUnsupportedResourceTypeForRunAs) Error() string {
return fmt.Sprintf("%s are not supported when the current deployment user is different from the bundle's run_as identity. Please deploy as the run_as identity. Please refer to the documentation at https://docs.databricks.com/dev-tools/bundles/run-as.html for more details. Location of the unsupported resource: %s. Current identity: %s. Run as identity: %s", e.resourceType, e.resourceLocation, e.currentUser, e.runAsUser)
}
func validateRunAs(b *bundle.Bundle) diag.Diagnostics {
diags := diag.Diagnostics{}
type errBothSpAndUserSpecified struct {
spName string
spLoc dyn.Location
userName string
userLoc dyn.Location
}
neitherSpecifiedErr := diag.Diagnostics{{
Summary: "run_as section must specify exactly one identity. Neither service_principal_name nor user_name is specified",
Locations: []dyn.Location{b.Config.GetLocation("run_as")},
Severity: diag.Error,
}}
func (e errBothSpAndUserSpecified) Error() string {
return fmt.Sprintf("run_as section must specify exactly one identity. A service_principal_name %q is specified at %s. A user_name %q is defined at %s", e.spName, e.spLoc, e.userName, e.userLoc)
}
func validateRunAs(b *bundle.Bundle) error {
neitherSpecifiedErr := fmt.Errorf("run_as section must specify exactly one identity. Neither service_principal_name nor user_name is specified at %s", b.Config.GetLocation("run_as"))
// Error if neither service_principal_name nor user_name are specified, but the
// Fail fast if neither service_principal_name nor user_name are specified, but the
// run_as section is present.
if b.Config.Value().Get("run_as").Kind() == dyn.KindNil {
return neitherSpecifiedErr
}
// Error if one or both of service_principal_name and user_name are specified,
// Fail fast if one or both of service_principal_name and user_name are specified,
// but with empty values.
if b.Config.RunAs.ServicePrincipalName == "" && b.Config.RunAs.UserName == "" {
runAs := b.Config.RunAs
if runAs.ServicePrincipalName == "" && runAs.UserName == "" {
return neitherSpecifiedErr
}
// Error if both service_principal_name and user_name are specified
runAs := b.Config.RunAs
if runAs.UserName != "" && runAs.ServicePrincipalName != "" {
return errBothSpAndUserSpecified{
spName: runAs.ServicePrincipalName,
userName: runAs.UserName,
spLoc: b.Config.GetLocation("run_as.service_principal_name"),
userLoc: b.Config.GetLocation("run_as.user_name"),
}
diags = diags.Extend(diag.Diagnostics{{
Summary: "run_as section cannot specify both user_name and service_principal_name",
Locations: []dyn.Location{b.Config.GetLocation("run_as")},
Severity: diag.Error,
}})
}
identity := runAs.ServicePrincipalName
@ -83,40 +77,40 @@ func validateRunAs(b *bundle.Bundle) error {
// All resources are supported if the run_as identity is the same as the current deployment identity.
if identity == b.Config.Workspace.CurrentUser.UserName {
return nil
return diags
}
// DLT pipelines do not support run_as in the API.
if len(b.Config.Resources.Pipelines) > 0 {
return errUnsupportedResourceTypeForRunAs{
resourceType: "pipelines",
resourceLocation: b.Config.GetLocation("resources.pipelines"),
currentUser: b.Config.Workspace.CurrentUser.UserName,
runAsUser: identity,
}
diags = diags.Extend(reportRunAsNotSupported(
"pipelines",
b.Config.GetLocation("resources.pipelines"),
b.Config.Workspace.CurrentUser.UserName,
identity,
))
}
// Model serving endpoints do not support run_as in the API.
if len(b.Config.Resources.ModelServingEndpoints) > 0 {
return errUnsupportedResourceTypeForRunAs{
resourceType: "model_serving_endpoints",
resourceLocation: b.Config.GetLocation("resources.model_serving_endpoints"),
currentUser: b.Config.Workspace.CurrentUser.UserName,
runAsUser: identity,
}
diags = diags.Extend(reportRunAsNotSupported(
"model_serving_endpoints",
b.Config.GetLocation("resources.model_serving_endpoints"),
b.Config.Workspace.CurrentUser.UserName,
identity,
))
}
// Monitors do not support run_as in the API.
if len(b.Config.Resources.QualityMonitors) > 0 {
return errUnsupportedResourceTypeForRunAs{
resourceType: "quality_monitors",
resourceLocation: b.Config.GetLocation("resources.quality_monitors"),
currentUser: b.Config.Workspace.CurrentUser.UserName,
runAsUser: identity,
}
diags = diags.Extend(reportRunAsNotSupported(
"quality_monitors",
b.Config.GetLocation("resources.quality_monitors"),
b.Config.Workspace.CurrentUser.UserName,
identity,
))
}
return nil
return diags
}
func setRunAsForJobs(b *bundle.Bundle) {
@ -187,8 +181,9 @@ func (m *setRunAs) Apply(_ context.Context, b *bundle.Bundle) diag.Diagnostics {
}
// Assert the run_as configuration is valid in the context of the bundle
if err := validateRunAs(b); err != nil {
return diag.FromErr(err)
diags := validateRunAs(b)
if diags.HasError() {
return diags
}
setRunAsForJobs(b)

View File

@ -32,6 +32,7 @@ func allResourceTypes(t *testing.T) []string {
// the dyn library gives us the correct list of all resources supported. Please
// also update this check when adding a new resource
require.Equal(t, []string{
"clusters",
"experiments",
"jobs",
"model_serving_endpoints",
@ -133,6 +134,7 @@ func TestRunAsErrorForUnsupportedResources(t *testing.T) {
// some point in the future. These resources are (implicitly) on the deny list, since
// they are not on the allow list below.
allowList := []string{
"clusters",
"jobs",
"models",
"registered_models",
@ -186,11 +188,8 @@ func TestRunAsErrorForUnsupportedResources(t *testing.T) {
Config: *r,
}
diags := bundle.Apply(context.Background(), b, SetRunAs())
assert.Equal(t, diags.Error().Error(), errUnsupportedResourceTypeForRunAs{
resourceType: rt,
resourceLocation: dyn.Location{},
currentUser: "alice",
runAsUser: "bob",
}.Error(), "expected run_as with a different identity than the current deployment user to not supported for resources of type: %s", rt)
assert.Contains(t, diags.Error().Error(), "do not support a setting a run_as user that is different from the owner.\n"+
"Current identity: alice. Run as identity: bob.\n"+
"See https://docs.databricks.com/dev-tools/bundles/run-as.html to learn more about the run_as property.", rt)
}
}

View File

@ -15,7 +15,7 @@ import (
func TestSyncDefaultPath_DefaultIfUnset(t *testing.T) {
b := &bundle.Bundle{
RootPath: "/tmp/some/dir",
BundleRootPath: "/tmp/some/dir",
Config: config.Root{},
}
@ -51,7 +51,7 @@ func TestSyncDefaultPath_SkipIfSet(t *testing.T) {
for _, tcase := range tcases {
t.Run(tcase.name, func(t *testing.T) {
b := &bundle.Bundle{
RootPath: "/tmp/some/dir",
BundleRootPath: "/tmp/some/dir",
Config: config.Root{},
}

View File

@ -57,7 +57,7 @@ func (m *syncInferRoot) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagno
var diags diag.Diagnostics
// Use the bundle root path as the starting point for inferring the sync root path.
bundleRootPath := filepath.Clean(b.RootPath)
bundleRootPath := filepath.Clean(b.BundleRootPath)
// Infer the sync root path by looking at each one of the sync paths.
// Every sync path must be a descendant of the final sync root path.

View File

@ -9,13 +9,14 @@ import (
"github.com/databricks/cli/bundle/config"
"github.com/databricks/cli/bundle/config/mutator"
"github.com/databricks/cli/bundle/internal/bundletest"
"github.com/databricks/cli/libs/dyn"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestSyncInferRoot_NominalAbsolute(t *testing.T) {
b := &bundle.Bundle{
RootPath: "/tmp/some/dir",
BundleRootPath: "/tmp/some/dir",
Config: config.Root{
Sync: config.Sync{
Paths: []string{
@ -46,7 +47,7 @@ func TestSyncInferRoot_NominalAbsolute(t *testing.T) {
func TestSyncInferRoot_NominalRelative(t *testing.T) {
b := &bundle.Bundle{
RootPath: "./some/dir",
BundleRootPath: "./some/dir",
Config: config.Root{
Sync: config.Sync{
Paths: []string{
@ -77,7 +78,7 @@ func TestSyncInferRoot_NominalRelative(t *testing.T) {
func TestSyncInferRoot_ParentDirectory(t *testing.T) {
b := &bundle.Bundle{
RootPath: "/tmp/some/dir",
BundleRootPath: "/tmp/some/dir",
Config: config.Root{
Sync: config.Sync{
Paths: []string{
@ -108,7 +109,7 @@ func TestSyncInferRoot_ParentDirectory(t *testing.T) {
func TestSyncInferRoot_ManyParentDirectories(t *testing.T) {
b := &bundle.Bundle{
RootPath: "/tmp/some/dir/that/is/very/deeply/nested",
BundleRootPath: "/tmp/some/dir/that/is/very/deeply/nested",
Config: config.Root{
Sync: config.Sync{
Paths: []string{
@ -145,7 +146,7 @@ func TestSyncInferRoot_ManyParentDirectories(t *testing.T) {
func TestSyncInferRoot_MultiplePaths(t *testing.T) {
b := &bundle.Bundle{
RootPath: "/tmp/some/bundle/root",
BundleRootPath: "/tmp/some/bundle/root",
Config: config.Root{
Sync: config.Sync{
Paths: []string{
@ -172,7 +173,7 @@ func TestSyncInferRoot_MultiplePaths(t *testing.T) {
func TestSyncInferRoot_Error(t *testing.T) {
b := &bundle.Bundle{
RootPath: "/tmp/some/dir",
BundleRootPath: "/tmp/some/dir",
Config: config.Root{
Sync: config.Sync{
Paths: []string{
@ -184,7 +185,7 @@ func TestSyncInferRoot_Error(t *testing.T) {
},
}
bundletest.SetLocation(b, "sync.paths", "databricks.yml")
bundletest.SetLocation(b, "sync.paths", []dyn.Location{{File: "databricks.yml"}})
ctx := context.Background()
diags := bundle.Apply(ctx, b, mutator.SyncInferRoot())

View File

@ -4,97 +4,11 @@ import (
"fmt"
"slices"
"github.com/databricks/cli/bundle/libraries"
"github.com/databricks/cli/bundle/config/mutator/paths"
"github.com/databricks/cli/libs/dyn"
)
type jobRewritePattern struct {
pattern dyn.Pattern
fn rewriteFunc
skipRewrite func(string) bool
}
func noSkipRewrite(string) bool {
return false
}
func rewritePatterns(t *translateContext, base dyn.Pattern) []jobRewritePattern {
return []jobRewritePattern{
{
base.Append(dyn.Key("notebook_task"), dyn.Key("notebook_path")),
t.translateNotebookPath,
noSkipRewrite,
},
{
base.Append(dyn.Key("spark_python_task"), dyn.Key("python_file")),
t.translateFilePath,
noSkipRewrite,
},
{
base.Append(dyn.Key("dbt_task"), dyn.Key("project_directory")),
t.translateDirectoryPath,
noSkipRewrite,
},
{
base.Append(dyn.Key("sql_task"), dyn.Key("file"), dyn.Key("path")),
t.translateFilePath,
noSkipRewrite,
},
{
base.Append(dyn.Key("libraries"), dyn.AnyIndex(), dyn.Key("whl")),
t.translateNoOp,
noSkipRewrite,
},
{
base.Append(dyn.Key("libraries"), dyn.AnyIndex(), dyn.Key("jar")),
t.translateNoOp,
noSkipRewrite,
},
{
base.Append(dyn.Key("libraries"), dyn.AnyIndex(), dyn.Key("requirements")),
t.translateFilePath,
noSkipRewrite,
},
}
}
func (t *translateContext) jobRewritePatterns() []jobRewritePattern {
// Base pattern to match all tasks in all jobs.
base := dyn.NewPattern(
dyn.Key("resources"),
dyn.Key("jobs"),
dyn.AnyKey(),
dyn.Key("tasks"),
dyn.AnyIndex(),
)
// Compile list of patterns and their respective rewrite functions.
jobEnvironmentsPatterns := []jobRewritePattern{
{
dyn.NewPattern(
dyn.Key("resources"),
dyn.Key("jobs"),
dyn.AnyKey(),
dyn.Key("environments"),
dyn.AnyIndex(),
dyn.Key("spec"),
dyn.Key("dependencies"),
dyn.AnyIndex(),
),
t.translateNoOpWithPrefix,
func(s string) bool {
return !libraries.IsLibraryLocal(s)
},
},
}
taskPatterns := rewritePatterns(t, base)
forEachPatterns := rewritePatterns(t, base.Append(dyn.Key("for_each_task"), dyn.Key("task")))
allPatterns := append(taskPatterns, jobEnvironmentsPatterns...)
allPatterns = append(allPatterns, forEachPatterns...)
return allPatterns
}
func (t *translateContext) applyJobTranslations(v dyn.Value) (dyn.Value, error) {
var err error
@ -111,8 +25,7 @@ func (t *translateContext) applyJobTranslations(v dyn.Value) (dyn.Value, error)
}
}
for _, rewritePattern := range t.jobRewritePatterns() {
v, err = dyn.MapByPattern(v, rewritePattern.pattern, func(p dyn.Path, v dyn.Value) (dyn.Value, error) {
return paths.VisitJobPaths(v, func(p dyn.Path, kind paths.PathKind, v dyn.Value) (dyn.Value, error) {
key := p[2].Key()
// Skip path translation if the job is using git source.
@ -125,16 +38,28 @@ func (t *translateContext) applyJobTranslations(v dyn.Value) (dyn.Value, error)
return dyn.InvalidValue, fmt.Errorf("unable to determine directory for job %s: %w", key, err)
}
sv := v.MustString()
if rewritePattern.skipRewrite(sv) {
return v, nil
}
return t.rewriteRelativeTo(p, v, rewritePattern.fn, dir, fallback[key])
})
rewritePatternFn, err := t.getRewritePatternFn(kind)
if err != nil {
return dyn.InvalidValue, err
}
return t.rewriteRelativeTo(p, v, rewritePatternFn, dir, fallback[key])
})
}
return v, nil
func (t *translateContext) getRewritePatternFn(kind paths.PathKind) (rewriteFunc, error) {
switch kind {
case paths.PathKindLibrary:
return t.translateNoOp, nil
case paths.PathKindNotebook:
return t.translateNotebookPath, nil
case paths.PathKindWorkspaceFile:
return t.translateFilePath, nil
case paths.PathKindDirectory:
return t.translateDirectoryPath, nil
case paths.PathKindWithPrefix:
return t.translateNoOpWithPrefix, nil
}
return nil, fmt.Errorf("unsupported path kind: %d", kind)
}

View File

@ -82,7 +82,7 @@ func TestTranslatePathsSkippedWithGitSource(t *testing.T) {
},
}
bundletest.SetLocation(b, ".", filepath.Join(dir, "resource.yml"))
bundletest.SetLocation(b, ".", []dyn.Location{{File: filepath.Join(dir, "resource.yml")}})
diags := bundle.Apply(context.Background(), b, mutator.TranslatePaths())
require.NoError(t, diags.Error())
@ -210,7 +210,7 @@ func TestTranslatePaths(t *testing.T) {
},
}
bundletest.SetLocation(b, ".", filepath.Join(dir, "resource.yml"))
bundletest.SetLocation(b, ".", []dyn.Location{{File: filepath.Join(dir, "resource.yml")}})
diags := bundle.Apply(context.Background(), b, mutator.TranslatePaths())
require.NoError(t, diags.Error())
@ -346,8 +346,8 @@ func TestTranslatePathsInSubdirectories(t *testing.T) {
},
}
bundletest.SetLocation(b, "resources.jobs", filepath.Join(dir, "job/resource.yml"))
bundletest.SetLocation(b, "resources.pipelines", filepath.Join(dir, "pipeline/resource.yml"))
bundletest.SetLocation(b, "resources.jobs", []dyn.Location{{File: filepath.Join(dir, "job/resource.yml")}})
bundletest.SetLocation(b, "resources.pipelines", []dyn.Location{{File: filepath.Join(dir, "pipeline/resource.yml")}})
diags := bundle.Apply(context.Background(), b, mutator.TranslatePaths())
require.NoError(t, diags.Error())
@ -408,7 +408,7 @@ func TestTranslatePathsOutsideSyncRoot(t *testing.T) {
},
}
bundletest.SetLocation(b, ".", filepath.Join(dir, "../resource.yml"))
bundletest.SetLocation(b, ".", []dyn.Location{{File: filepath.Join(dir, "../resource.yml")}})
diags := bundle.Apply(context.Background(), b, mutator.TranslatePaths())
assert.ErrorContains(t, diags.Error(), "is not contained in sync root path")
@ -439,7 +439,7 @@ func TestJobNotebookDoesNotExistError(t *testing.T) {
},
}
bundletest.SetLocation(b, ".", filepath.Join(dir, "fake.yml"))
bundletest.SetLocation(b, ".", []dyn.Location{{File: filepath.Join(dir, "fake.yml")}})
diags := bundle.Apply(context.Background(), b, mutator.TranslatePaths())
assert.EqualError(t, diags.Error(), "notebook ./doesnt_exist.py not found")
@ -470,7 +470,7 @@ func TestJobFileDoesNotExistError(t *testing.T) {
},
}
bundletest.SetLocation(b, ".", filepath.Join(dir, "fake.yml"))
bundletest.SetLocation(b, ".", []dyn.Location{{File: filepath.Join(dir, "fake.yml")}})
diags := bundle.Apply(context.Background(), b, mutator.TranslatePaths())
assert.EqualError(t, diags.Error(), "file ./doesnt_exist.py not found")
@ -501,7 +501,7 @@ func TestPipelineNotebookDoesNotExistError(t *testing.T) {
},
}
bundletest.SetLocation(b, ".", filepath.Join(dir, "fake.yml"))
bundletest.SetLocation(b, ".", []dyn.Location{{File: filepath.Join(dir, "fake.yml")}})
diags := bundle.Apply(context.Background(), b, mutator.TranslatePaths())
assert.EqualError(t, diags.Error(), "notebook ./doesnt_exist.py not found")
@ -532,7 +532,7 @@ func TestPipelineFileDoesNotExistError(t *testing.T) {
},
}
bundletest.SetLocation(b, ".", filepath.Join(dir, "fake.yml"))
bundletest.SetLocation(b, ".", []dyn.Location{{File: filepath.Join(dir, "fake.yml")}})
diags := bundle.Apply(context.Background(), b, mutator.TranslatePaths())
assert.EqualError(t, diags.Error(), "file ./doesnt_exist.py not found")
@ -567,7 +567,7 @@ func TestJobSparkPythonTaskWithNotebookSourceError(t *testing.T) {
},
}
bundletest.SetLocation(b, ".", filepath.Join(dir, "resource.yml"))
bundletest.SetLocation(b, ".", []dyn.Location{{File: filepath.Join(dir, "resource.yml")}})
diags := bundle.Apply(context.Background(), b, mutator.TranslatePaths())
assert.ErrorContains(t, diags.Error(), `expected a file for "resources.jobs.job.tasks[0].spark_python_task.python_file" but got a notebook`)
@ -602,7 +602,7 @@ func TestJobNotebookTaskWithFileSourceError(t *testing.T) {
},
}
bundletest.SetLocation(b, ".", filepath.Join(dir, "resource.yml"))
bundletest.SetLocation(b, ".", []dyn.Location{{File: filepath.Join(dir, "resource.yml")}})
diags := bundle.Apply(context.Background(), b, mutator.TranslatePaths())
assert.ErrorContains(t, diags.Error(), `expected a notebook for "resources.jobs.job.tasks[0].notebook_task.notebook_path" but got a file`)
@ -637,7 +637,7 @@ func TestPipelineNotebookLibraryWithFileSourceError(t *testing.T) {
},
}
bundletest.SetLocation(b, ".", filepath.Join(dir, "resource.yml"))
bundletest.SetLocation(b, ".", []dyn.Location{{File: filepath.Join(dir, "resource.yml")}})
diags := bundle.Apply(context.Background(), b, mutator.TranslatePaths())
assert.ErrorContains(t, diags.Error(), `expected a notebook for "resources.pipelines.pipeline.libraries[0].notebook.path" but got a file`)
@ -672,7 +672,7 @@ func TestPipelineFileLibraryWithNotebookSourceError(t *testing.T) {
},
}
bundletest.SetLocation(b, ".", filepath.Join(dir, "resource.yml"))
bundletest.SetLocation(b, ".", []dyn.Location{{File: filepath.Join(dir, "resource.yml")}})
diags := bundle.Apply(context.Background(), b, mutator.TranslatePaths())
assert.ErrorContains(t, diags.Error(), `expected a file for "resources.pipelines.pipeline.libraries[0].file.path" but got a notebook`)
@ -710,7 +710,7 @@ func TestTranslatePathJobEnvironments(t *testing.T) {
},
}
bundletest.SetLocation(b, "resources.jobs", filepath.Join(dir, "job/resource.yml"))
bundletest.SetLocation(b, "resources.jobs", []dyn.Location{{File: filepath.Join(dir, "job/resource.yml")}})
diags := bundle.Apply(context.Background(), b, mutator.TranslatePaths())
require.NoError(t, diags.Error())
@ -753,8 +753,8 @@ func TestTranslatePathWithComplexVariables(t *testing.T) {
},
}
bundletest.SetLocation(b, "variables", filepath.Join(dir, "variables/variables.yml"))
bundletest.SetLocation(b, "resources.jobs", filepath.Join(dir, "job/resource.yml"))
bundletest.SetLocation(b, "variables", []dyn.Location{{File: filepath.Join(dir, "variables/variables.yml")}})
bundletest.SetLocation(b, "resources.jobs", []dyn.Location{{File: filepath.Join(dir, "job/resource.yml")}})
ctx := context.Background()
// Assign the variables to the dynamic configuration.

View File

@ -40,6 +40,10 @@ func (v *verifyCliVersion) Apply(ctx context.Context, b *bundle.Bundle) diag.Dia
}
if !c.Check(version) {
if version.Prerelease() == "dev" && version.Major() == 0 {
return diag.Warningf("Ignoring Databricks CLI version constraint for development build. Required: %s, current: %s", constraint, currentVersion)
}
return diag.Errorf("Databricks CLI version constraint not satisfied. Required: %s, current: %s", constraint, currentVersion)
}

View File

@ -107,6 +107,11 @@ func TestVerifyCliVersion(t *testing.T) {
constraint: "^0.100",
expectedError: "invalid version constraint \"^0.100\" specified. Please specify the version constraint in the format (>=) 0.0.0(, <= 1.0.0)",
},
{
currentVersion: "0.0.0-dev+06b169284737",
constraint: ">= 0.100.0",
expectedError: "Ignoring Databricks CLI version constraint for development build. Required: >= 0.100.0",
},
}
t.Cleanup(func() {
@ -130,7 +135,7 @@ func TestVerifyCliVersion(t *testing.T) {
diags := bundle.Apply(context.Background(), b, VerifyCliVersion())
if tc.expectedError != "" {
require.NotEmpty(t, diags)
require.Equal(t, tc.expectedError, diags.Error().Error())
require.Contains(t, diags[0].Summary, tc.expectedError)
} else {
require.Empty(t, diags)
}

View File

@ -19,6 +19,7 @@ type Resources struct {
RegisteredModels map[string]*resources.RegisteredModel `json:"registered_models,omitempty"`
QualityMonitors map[string]*resources.QualityMonitor `json:"quality_monitors,omitempty"`
Schemas map[string]*resources.Schema `json:"schemas,omitempty"`
Clusters map[string]*resources.Cluster `json:"clusters,omitempty"`
}
type ConfigResource interface {
@ -58,3 +59,22 @@ func (r *Resources) FindResourceByConfigKey(key string) (ConfigResource, error)
return found[0], nil
}
type ResourceDescription struct {
SingularName string
}
// The keys of the map corresponds to the resource key in the bundle configuration.
func SupportedResources() map[string]ResourceDescription {
return map[string]ResourceDescription{
"jobs": {SingularName: "job"},
"pipelines": {SingularName: "pipeline"},
"models": {SingularName: "model"},
"experiments": {SingularName: "experiment"},
"model_serving_endpoints": {SingularName: "model_serving_endpoint"},
"registered_models": {SingularName: "registered_model"},
"quality_monitors": {SingularName: "quality_monitor"},
"schemas": {SingularName: "schema"},
"clusters": {SingularName: "cluster"},
}
}

View File

@ -0,0 +1,39 @@
package resources
import (
"context"
"github.com/databricks/cli/libs/log"
"github.com/databricks/databricks-sdk-go"
"github.com/databricks/databricks-sdk-go/marshal"
"github.com/databricks/databricks-sdk-go/service/compute"
)
type Cluster struct {
ID string `json:"id,omitempty" bundle:"readonly"`
Permissions []Permission `json:"permissions,omitempty"`
ModifiedStatus ModifiedStatus `json:"modified_status,omitempty" bundle:"internal"`
*compute.ClusterSpec
}
func (s *Cluster) UnmarshalJSON(b []byte) error {
return marshal.Unmarshal(b, s)
}
func (s Cluster) MarshalJSON() ([]byte, error) {
return marshal.Marshal(s)
}
func (s *Cluster) Exists(ctx context.Context, w *databricks.WorkspaceClient, id string) (bool, error) {
_, err := w.Clusters.GetByClusterId(ctx, id)
if err != nil {
log.Debugf(ctx, "cluster %s does not exist", id)
return false, err
}
return true, nil
}
func (s *Cluster) TerraformResourceName() string {
return "databricks_cluster"
}

View File

@ -3,6 +3,7 @@ package config
import (
"encoding/json"
"reflect"
"strings"
"testing"
"github.com/stretchr/testify/assert"
@ -61,3 +62,18 @@ func TestCustomMarshallerIsImplemented(t *testing.T) {
}, "Resource %s does not have a custom unmarshaller", field.Name)
}
}
func TestSupportedResources(t *testing.T) {
expected := map[string]ResourceDescription{}
typ := reflect.TypeOf(Resources{})
for i := 0; i < typ.NumField(); i++ {
field := typ.Field(i)
jsonTags := strings.Split(field.Tag.Get("json"), ",")
singularName := strings.TrimSuffix(jsonTags[0], "s")
expected[jsonTags[0]] = ResourceDescription{SingularName: singularName}
}
// Please add your resource to the SupportedResources() function in resources.go
// if you are adding a new resource.
assert.Equal(t, expected, SupportedResources())
}

View File

@ -366,9 +366,9 @@ func (r *Root) MergeTargetOverrides(name string) error {
}
}
// Merge `compute_id`. This field must be overwritten if set, not merged.
if v := target.Get("compute_id"); v.Kind() != dyn.KindInvalid {
root, err = dyn.SetByPath(root, dyn.NewPath(dyn.Key("bundle"), dyn.Key("compute_id")), v)
// Merge `cluster_id`. This field must be overwritten if set, not merged.
if v := target.Get("cluster_id"); v.Kind() != dyn.KindInvalid {
root, err = dyn.SetByPath(root, dyn.NewPath(dyn.Key("bundle"), dyn.Key("cluster_id")), v)
if err != nil {
return err
}
@ -406,6 +406,52 @@ func (r *Root) MergeTargetOverrides(name string) error {
return r.updateWithDynamicValue(root)
}
var allowedVariableDefinitions = []([]string){
{"default", "type", "description"},
{"default", "type"},
{"default", "description"},
{"lookup", "description"},
{"default"},
{"lookup"},
}
// isFullVariableOverrideDef checks if the given value is a full syntax varaible override.
// A full syntax variable override is a map with either 1 of 2 keys.
// If it's 2 keys, the keys should be "default" and "type".
// If it's 1 key, the key should be one of the following keys: "default", "lookup".
func isFullVariableOverrideDef(v dyn.Value) bool {
mv, ok := v.AsMap()
if !ok {
return false
}
// If the map has more than 3 keys, it is not a full variable override.
if mv.Len() > 3 {
return false
}
for _, keys := range allowedVariableDefinitions {
if len(keys) != mv.Len() {
continue
}
// Check if the keys are the same.
match := true
for _, key := range keys {
if _, ok := mv.GetByString(key); !ok {
match = false
break
}
}
if match {
return true
}
}
return false
}
// rewriteShorthands performs lightweight rewriting of the configuration
// tree where we allow users to write a shorthand and must rewrite to the full form.
func rewriteShorthands(v dyn.Value) (dyn.Value, error) {
@ -433,20 +479,27 @@ func rewriteShorthands(v dyn.Value) (dyn.Value, error) {
}, variable.Locations()), nil
case dyn.KindMap, dyn.KindSequence:
// Check if the original definition of variable has a type field.
typeV, err := dyn.GetByPath(v, p.Append(dyn.Key("type")))
if err != nil {
// If it's a full variable definition, leave it as is.
if isFullVariableOverrideDef(variable) {
return variable, nil
}
if typeV.MustString() == "complex" {
// Check if the original definition of variable has a type field.
// If it has a type field, it means the shorthand is a value of a complex type.
// Type might not be found if the variable overriden in a separate file
// and configuration is not merged yet.
typeV, err := dyn.GetByPath(v, p.Append(dyn.Key("type")))
if err == nil && typeV.MustString() == "complex" {
return dyn.NewValue(map[string]dyn.Value{
"type": typeV,
"default": variable,
}, variable.Locations()), nil
}
return variable, nil
// If it's a shorthand, rewrite it to a full variable definition.
return dyn.NewValue(map[string]dyn.Value{
"default": variable,
}, variable.Locations()), nil
default:
return variable, nil

View File

@ -6,6 +6,7 @@ import (
"testing"
"github.com/databricks/cli/bundle/config/variable"
"github.com/databricks/cli/libs/dyn"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
@ -139,7 +140,7 @@ func TestRootMergeTargetOverridesWithVariables(t *testing.T) {
},
Targets: map[string]*Target{
"development": {
Variables: map[string]*variable.Variable{
Variables: map[string]*variable.TargetVariable{
"foo": {
Default: "bar",
Description: "wrong",
@ -169,3 +170,87 @@ func TestRootMergeTargetOverridesWithVariables(t *testing.T) {
assert.Equal(t, "complex var", root.Variables["complex"].Description)
}
func TestIsFullVariableOverrideDef(t *testing.T) {
testCases := []struct {
value dyn.Value
expected bool
}{
{
value: dyn.V(map[string]dyn.Value{
"type": dyn.V("string"),
"default": dyn.V("foo"),
"description": dyn.V("foo var"),
}),
expected: true,
},
{
value: dyn.V(map[string]dyn.Value{
"type": dyn.V("string"),
"lookup": dyn.V("foo"),
"description": dyn.V("foo var"),
}),
expected: false,
},
{
value: dyn.V(map[string]dyn.Value{
"type": dyn.V("string"),
"default": dyn.V("foo"),
}),
expected: true,
},
{
value: dyn.V(map[string]dyn.Value{
"type": dyn.V("string"),
"lookup": dyn.V("foo"),
}),
expected: false,
},
{
value: dyn.V(map[string]dyn.Value{
"description": dyn.V("string"),
"default": dyn.V("foo"),
}),
expected: true,
},
{
value: dyn.V(map[string]dyn.Value{
"description": dyn.V("string"),
"lookup": dyn.V("foo"),
}),
expected: true,
},
{
value: dyn.V(map[string]dyn.Value{
"default": dyn.V("foo"),
}),
expected: true,
},
{
value: dyn.V(map[string]dyn.Value{
"lookup": dyn.V("foo"),
}),
expected: true,
},
{
value: dyn.V(map[string]dyn.Value{
"type": dyn.V("string"),
}),
expected: false,
},
{
value: dyn.V(map[string]dyn.Value{
"type": dyn.V("string"),
"default": dyn.V("foo"),
"description": dyn.V("foo var"),
"lookup": dyn.V("foo"),
}),
expected: false,
},
}
for i, tc := range testCases {
assert.Equal(t, tc.expected, isFullVariableOverrideDef(tc.value), "test case %d", i)
}
}

View File

@ -24,8 +24,11 @@ type Target struct {
// name prefix of deployed resources.
Presets Presets `json:"presets,omitempty"`
// Overrides the compute used for jobs and other supported assets.
ComputeID string `json:"compute_id,omitempty"`
// DEPRECATED: Overrides the compute used for jobs and other supported assets.
ComputeId string `json:"compute_id,omitempty"`
// Overrides the cluster used for jobs and other supported assets.
ClusterId string `json:"cluster_id,omitempty"`
Bundle *Bundle `json:"bundle,omitempty"`
@ -38,7 +41,26 @@ type Target struct {
// Override default values or lookup name for defined variables
// Does not permit defining new variables or redefining existing ones
// in the scope of an target
Variables map[string]*variable.Variable `json:"variables,omitempty"`
//
// There are two valid ways to define a variable override in a target:
// 1. Direct value override. We normalize this to the variable.Variable
// struct format when loading the configuration YAML:
//
// variables:
// foo: "value"
//
// 2. Override matching the variable.Variable struct.
//
// variables:
// foo:
// default: "value"
//
// OR
//
// variables:
// foo:
// lookup: "resource_name"
Variables map[string]*variable.TargetVariable `json:"variables,omitempty"`
Git Git `json:"git,omitempty"`

View File

@ -0,0 +1,161 @@
package validate
import (
"context"
"fmt"
"strings"
"github.com/databricks/cli/bundle"
"github.com/databricks/cli/libs/diag"
"github.com/databricks/cli/libs/dyn"
"github.com/databricks/databricks-sdk-go/service/jobs"
)
// JobTaskClusterSpec validates that job tasks have cluster spec defined
// if task requires a cluster
func JobTaskClusterSpec() bundle.ReadOnlyMutator {
return &jobTaskClusterSpec{}
}
type jobTaskClusterSpec struct {
}
func (v *jobTaskClusterSpec) Name() string {
return "validate:job_task_cluster_spec"
}
func (v *jobTaskClusterSpec) Apply(ctx context.Context, rb bundle.ReadOnlyBundle) diag.Diagnostics {
diags := diag.Diagnostics{}
jobsPath := dyn.NewPath(dyn.Key("resources"), dyn.Key("jobs"))
for resourceName, job := range rb.Config().Resources.Jobs {
resourcePath := jobsPath.Append(dyn.Key(resourceName))
for taskIndex, task := range job.Tasks {
taskPath := resourcePath.Append(dyn.Key("tasks"), dyn.Index(taskIndex))
diags = diags.Extend(validateJobTask(rb, task, taskPath))
}
}
return diags
}
func validateJobTask(rb bundle.ReadOnlyBundle, task jobs.Task, taskPath dyn.Path) diag.Diagnostics {
diags := diag.Diagnostics{}
var specified []string
var unspecified []string
if task.JobClusterKey != "" {
specified = append(specified, "job_cluster_key")
} else {
unspecified = append(unspecified, "job_cluster_key")
}
if task.EnvironmentKey != "" {
specified = append(specified, "environment_key")
} else {
unspecified = append(unspecified, "environment_key")
}
if task.ExistingClusterId != "" {
specified = append(specified, "existing_cluster_id")
} else {
unspecified = append(unspecified, "existing_cluster_id")
}
if task.NewCluster != nil {
specified = append(specified, "new_cluster")
} else {
unspecified = append(unspecified, "new_cluster")
}
if task.ForEachTask != nil {
forEachTaskPath := taskPath.Append(dyn.Key("for_each_task"), dyn.Key("task"))
diags = diags.Extend(validateJobTask(rb, task.ForEachTask.Task, forEachTaskPath))
}
if isComputeTask(task) && len(specified) == 0 {
if task.NotebookTask != nil {
// notebook tasks without cluster spec will use notebook environment
} else {
// path might be not very helpful, adding user-specified task key clarifies the context
detail := fmt.Sprintf(
"Task %q requires a cluster or an environment to run.\nSpecify one of the following fields: %s.",
task.TaskKey,
strings.Join(unspecified, ", "),
)
diags = diags.Append(diag.Diagnostic{
Severity: diag.Error,
Summary: "Missing required cluster or environment settings",
Detail: detail,
Locations: rb.Config().GetLocations(taskPath.String()),
Paths: []dyn.Path{taskPath},
})
}
}
return diags
}
// isComputeTask returns true if the task runs on a cluster or serverless GC
func isComputeTask(task jobs.Task) bool {
if task.NotebookTask != nil {
// if warehouse_id is set, it's SQL notebook that doesn't need cluster or serverless GC
if task.NotebookTask.WarehouseId != "" {
return false
} else {
// task settings don't require specifying a cluster/serverless GC, but task itself can run on one
// we handle that case separately in validateJobTask
return true
}
}
if task.PythonWheelTask != nil {
return true
}
if task.DbtTask != nil {
return true
}
if task.SparkJarTask != nil {
return true
}
if task.SparkSubmitTask != nil {
return true
}
if task.SparkPythonTask != nil {
return true
}
if task.SqlTask != nil {
return false
}
if task.PipelineTask != nil {
// while pipelines use clusters, pipeline tasks don't, they only trigger pipelines
return false
}
if task.RunJobTask != nil {
return false
}
if task.ConditionTask != nil {
return false
}
// for each task doesn't use clusters, underlying task(s) can though
if task.ForEachTask != nil {
return false
}
return false
}

View File

@ -0,0 +1,203 @@
package validate
import (
"context"
"testing"
"github.com/databricks/cli/bundle"
"github.com/databricks/cli/bundle/config"
"github.com/databricks/cli/bundle/config/resources"
"github.com/databricks/databricks-sdk-go/service/compute"
"github.com/databricks/databricks-sdk-go/service/jobs"
"github.com/stretchr/testify/assert"
)
func TestJobTaskClusterSpec(t *testing.T) {
expectedSummary := "Missing required cluster or environment settings"
type testCase struct {
name string
task jobs.Task
errorPath string
errorDetail string
errorSummary string
}
testCases := []testCase{
{
name: "valid notebook task",
task: jobs.Task{
// while a cluster is needed, it will use notebook environment to create one
NotebookTask: &jobs.NotebookTask{},
},
},
{
name: "valid notebook task (job_cluster_key)",
task: jobs.Task{
JobClusterKey: "cluster1",
NotebookTask: &jobs.NotebookTask{},
},
},
{
name: "valid notebook task (new_cluster)",
task: jobs.Task{
NewCluster: &compute.ClusterSpec{},
NotebookTask: &jobs.NotebookTask{},
},
},
{
name: "valid notebook task (existing_cluster_id)",
task: jobs.Task{
ExistingClusterId: "cluster1",
NotebookTask: &jobs.NotebookTask{},
},
},
{
name: "valid SQL notebook task",
task: jobs.Task{
NotebookTask: &jobs.NotebookTask{
WarehouseId: "warehouse1",
},
},
},
{
name: "valid python wheel task",
task: jobs.Task{
JobClusterKey: "cluster1",
PythonWheelTask: &jobs.PythonWheelTask{},
},
},
{
name: "valid python wheel task (environment_key)",
task: jobs.Task{
EnvironmentKey: "environment1",
PythonWheelTask: &jobs.PythonWheelTask{},
},
},
{
name: "valid dbt task",
task: jobs.Task{
JobClusterKey: "cluster1",
DbtTask: &jobs.DbtTask{},
},
},
{
name: "valid spark jar task",
task: jobs.Task{
JobClusterKey: "cluster1",
SparkJarTask: &jobs.SparkJarTask{},
},
},
{
name: "valid spark submit",
task: jobs.Task{
NewCluster: &compute.ClusterSpec{},
SparkSubmitTask: &jobs.SparkSubmitTask{},
},
},
{
name: "valid spark python task",
task: jobs.Task{
JobClusterKey: "cluster1",
SparkPythonTask: &jobs.SparkPythonTask{},
},
},
{
name: "valid SQL task",
task: jobs.Task{
SqlTask: &jobs.SqlTask{},
},
},
{
name: "valid pipeline task",
task: jobs.Task{
PipelineTask: &jobs.PipelineTask{},
},
},
{
name: "valid run job task",
task: jobs.Task{
RunJobTask: &jobs.RunJobTask{},
},
},
{
name: "valid condition task",
task: jobs.Task{
ConditionTask: &jobs.ConditionTask{},
},
},
{
name: "valid for each task",
task: jobs.Task{
ForEachTask: &jobs.ForEachTask{
Task: jobs.Task{
JobClusterKey: "cluster1",
NotebookTask: &jobs.NotebookTask{},
},
},
},
},
{
name: "invalid python wheel task",
task: jobs.Task{
PythonWheelTask: &jobs.PythonWheelTask{},
TaskKey: "my_task",
},
errorPath: "resources.jobs.job1.tasks[0]",
errorDetail: `Task "my_task" requires a cluster or an environment to run.
Specify one of the following fields: job_cluster_key, environment_key, existing_cluster_id, new_cluster.`,
errorSummary: expectedSummary,
},
{
name: "invalid for each task",
task: jobs.Task{
ForEachTask: &jobs.ForEachTask{
Task: jobs.Task{
PythonWheelTask: &jobs.PythonWheelTask{},
TaskKey: "my_task",
},
},
},
errorPath: "resources.jobs.job1.tasks[0].for_each_task.task",
errorDetail: `Task "my_task" requires a cluster or an environment to run.
Specify one of the following fields: job_cluster_key, environment_key, existing_cluster_id, new_cluster.`,
errorSummary: expectedSummary,
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
job := &resources.Job{
JobSettings: &jobs.JobSettings{
Tasks: []jobs.Task{tc.task},
},
}
b := createBundle(map[string]*resources.Job{"job1": job})
diags := bundle.ApplyReadOnly(context.Background(), bundle.ReadOnly(b), JobTaskClusterSpec())
if tc.errorPath != "" || tc.errorDetail != "" || tc.errorSummary != "" {
assert.Len(t, diags, 1)
assert.Len(t, diags[0].Paths, 1)
diag := diags[0]
assert.Equal(t, tc.errorPath, diag.Paths[0].String())
assert.Equal(t, tc.errorSummary, diag.Summary)
assert.Equal(t, tc.errorDetail, diag.Detail)
} else {
assert.ElementsMatch(t, []string{}, diags)
}
})
}
}
func createBundle(jobs map[string]*resources.Job) *bundle.Bundle {
return &bundle.Bundle{
Config: config.Root{
Resources: config.Resources{
Jobs: jobs,
},
},
}
}

View File

@ -3,7 +3,6 @@ package validate
import (
"context"
"fmt"
"slices"
"sort"
"github.com/databricks/cli/bundle"
@ -66,10 +65,7 @@ func (m *uniqueResourceKeys) Apply(ctx context.Context, b *bundle.Bundle) diag.D
}
}
// dyn.Path under the hood is a slice. The code that walks the configuration
// tree uses the same underlying slice to track the path as it walks
// the tree. So, we need to clone it here.
m.paths = append(m.paths, slices.Clone(p))
m.paths = append(m.paths, p)
m.locations = append(m.locations, v.Locations()...)
resourceMetadata[k] = m

View File

@ -34,6 +34,7 @@ func (v *validate) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics
JobClusterKeyDefined(),
FilesToSync(),
ValidateSyncPatterns(),
JobTaskClusterSpec(),
))
}

View File

@ -220,6 +220,10 @@ type resolvers struct {
func allResolvers() *resolvers {
r := &resolvers{}
r.Alert = func(ctx context.Context, w *databricks.WorkspaceClient, name string) (string, error) {
fn, ok := lookupOverrides["Alert"]
if ok {
return fn(ctx, w, name)
}
entity, err := w.Alerts.GetByDisplayName(ctx, name)
if err != nil {
return "", err
@ -228,6 +232,10 @@ func allResolvers() *resolvers {
return fmt.Sprint(entity.Id), nil
}
r.ClusterPolicy = func(ctx context.Context, w *databricks.WorkspaceClient, name string) (string, error) {
fn, ok := lookupOverrides["ClusterPolicy"]
if ok {
return fn(ctx, w, name)
}
entity, err := w.ClusterPolicies.GetByName(ctx, name)
if err != nil {
return "", err
@ -236,6 +244,10 @@ func allResolvers() *resolvers {
return fmt.Sprint(entity.PolicyId), nil
}
r.Cluster = func(ctx context.Context, w *databricks.WorkspaceClient, name string) (string, error) {
fn, ok := lookupOverrides["Cluster"]
if ok {
return fn(ctx, w, name)
}
entity, err := w.Clusters.GetByClusterName(ctx, name)
if err != nil {
return "", err
@ -244,6 +256,10 @@ func allResolvers() *resolvers {
return fmt.Sprint(entity.ClusterId), nil
}
r.Dashboard = func(ctx context.Context, w *databricks.WorkspaceClient, name string) (string, error) {
fn, ok := lookupOverrides["Dashboard"]
if ok {
return fn(ctx, w, name)
}
entity, err := w.Dashboards.GetByName(ctx, name)
if err != nil {
return "", err
@ -252,6 +268,10 @@ func allResolvers() *resolvers {
return fmt.Sprint(entity.Id), nil
}
r.InstancePool = func(ctx context.Context, w *databricks.WorkspaceClient, name string) (string, error) {
fn, ok := lookupOverrides["InstancePool"]
if ok {
return fn(ctx, w, name)
}
entity, err := w.InstancePools.GetByInstancePoolName(ctx, name)
if err != nil {
return "", err
@ -260,6 +280,10 @@ func allResolvers() *resolvers {
return fmt.Sprint(entity.InstancePoolId), nil
}
r.Job = func(ctx context.Context, w *databricks.WorkspaceClient, name string) (string, error) {
fn, ok := lookupOverrides["Job"]
if ok {
return fn(ctx, w, name)
}
entity, err := w.Jobs.GetBySettingsName(ctx, name)
if err != nil {
return "", err
@ -268,6 +292,10 @@ func allResolvers() *resolvers {
return fmt.Sprint(entity.JobId), nil
}
r.Metastore = func(ctx context.Context, w *databricks.WorkspaceClient, name string) (string, error) {
fn, ok := lookupOverrides["Metastore"]
if ok {
return fn(ctx, w, name)
}
entity, err := w.Metastores.GetByName(ctx, name)
if err != nil {
return "", err
@ -276,6 +304,10 @@ func allResolvers() *resolvers {
return fmt.Sprint(entity.MetastoreId), nil
}
r.Pipeline = func(ctx context.Context, w *databricks.WorkspaceClient, name string) (string, error) {
fn, ok := lookupOverrides["Pipeline"]
if ok {
return fn(ctx, w, name)
}
entity, err := w.Pipelines.GetByName(ctx, name)
if err != nil {
return "", err
@ -284,6 +316,10 @@ func allResolvers() *resolvers {
return fmt.Sprint(entity.PipelineId), nil
}
r.Query = func(ctx context.Context, w *databricks.WorkspaceClient, name string) (string, error) {
fn, ok := lookupOverrides["Query"]
if ok {
return fn(ctx, w, name)
}
entity, err := w.Queries.GetByDisplayName(ctx, name)
if err != nil {
return "", err
@ -292,6 +328,10 @@ func allResolvers() *resolvers {
return fmt.Sprint(entity.Id), nil
}
r.ServicePrincipal = func(ctx context.Context, w *databricks.WorkspaceClient, name string) (string, error) {
fn, ok := lookupOverrides["ServicePrincipal"]
if ok {
return fn(ctx, w, name)
}
entity, err := w.ServicePrincipals.GetByDisplayName(ctx, name)
if err != nil {
return "", err
@ -300,6 +340,10 @@ func allResolvers() *resolvers {
return fmt.Sprint(entity.ApplicationId), nil
}
r.Warehouse = func(ctx context.Context, w *databricks.WorkspaceClient, name string) (string, error) {
fn, ok := lookupOverrides["Warehouse"]
if ok {
return fn(ctx, w, name)
}
entity, err := w.Warehouses.GetByName(ctx, name)
if err != nil {
return "", err

View File

@ -0,0 +1,41 @@
package variable
import (
"context"
"fmt"
"github.com/databricks/databricks-sdk-go"
"github.com/databricks/databricks-sdk-go/service/compute"
)
var lookupOverrides = map[string]resolverFunc{
"Cluster": resolveCluster,
}
// We added a custom resolver for the cluster to add filtering for the cluster source when we list all clusters.
// Without the filtering listing could take a very long time (5-10 mins) which leads to lookup timeouts.
func resolveCluster(ctx context.Context, w *databricks.WorkspaceClient, name string) (string, error) {
result, err := w.Clusters.ListAll(ctx, compute.ListClustersRequest{
FilterBy: &compute.ListClustersFilterBy{
ClusterSources: []compute.ClusterSource{compute.ClusterSourceApi, compute.ClusterSourceUi},
},
})
if err != nil {
return "", err
}
tmp := map[string][]compute.ClusterDetails{}
for _, v := range result {
key := v.ClusterName
tmp[key] = append(tmp[key], v)
}
alternatives, ok := tmp[name]
if !ok || len(alternatives) == 0 {
return "", fmt.Errorf("cluster named '%s' does not exist", name)
}
if len(alternatives) > 1 {
return "", fmt.Errorf("there are %d instances of clusters named '%s'", len(alternatives), name)
}
return alternatives[0].ClusterId, nil
}

View File

@ -16,6 +16,11 @@ const (
VariableTypeComplex VariableType = "complex"
)
// We alias it here to override the JSON schema associated with a variable value
// in a target override. This is because we allow for directly specifying the value
// in addition to the variable.Variable struct format in a target override.
type TargetVariable Variable
// An input variable for the bundle config
type Variable struct {
// A type of the variable. This is used to validate the value of the variable

View File

@ -47,13 +47,18 @@ type Workspace struct {
// Remote workspace base path for deployment state, for artifacts, as synchronization target.
// This defaults to "~/.bundle/${bundle.name}/${bundle.target}" where "~" expands to
// the current user's home directory in the workspace (e.g. `/Users/jane@doe.com`).
// the current user's home directory in the workspace (e.g. `/Workspace/Users/jane@doe.com`).
RootPath string `json:"root_path,omitempty"`
// Remote workspace path to synchronize local files to.
// This defaults to "${workspace.root}/files".
FilePath string `json:"file_path,omitempty"`
// Remote workspace path for resources with a presence in the workspace.
// These are kept outside [FilePath] to avoid potential naming collisions.
// This defaults to "${workspace.root}/resources".
ResourcePath string `json:"resource_path,omitempty"`
// Remote workspace path for build artifacts.
// This defaults to "${workspace.root}/artifacts".
ArtifactPath string `json:"artifact_path,omitempty"`

View File

@ -2,15 +2,21 @@ package files
import (
"context"
"errors"
"fmt"
"io/fs"
"github.com/databricks/cli/bundle"
"github.com/databricks/cli/bundle/permissions"
"github.com/databricks/cli/libs/cmdio"
"github.com/databricks/cli/libs/diag"
"github.com/databricks/cli/libs/log"
"github.com/databricks/cli/libs/sync"
)
type upload struct{}
type upload struct {
outputHandler sync.OutputHandler
}
func (m *upload) Name() string {
return "files.Upload"
@ -18,13 +24,23 @@ func (m *upload) Name() string {
func (m *upload) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
cmdio.LogString(ctx, fmt.Sprintf("Uploading bundle files to %s...", b.Config.Workspace.FilePath))
sync, err := GetSync(ctx, bundle.ReadOnly(b))
opts, err := GetSyncOptions(ctx, bundle.ReadOnly(b))
if err != nil {
return diag.FromErr(err)
}
opts.OutputHandler = m.outputHandler
sync, err := sync.New(ctx, *opts)
if err != nil {
return diag.FromErr(err)
}
defer sync.Close()
b.Files, err = sync.RunOnce(ctx)
if err != nil {
if errors.Is(err, fs.ErrPermission) {
return permissions.ReportPossiblePermissionDenied(ctx, b, b.Config.Workspace.FilePath)
}
return diag.FromErr(err)
}
@ -32,6 +48,6 @@ func (m *upload) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
return nil
}
func Upload() bundle.Mutator {
return &upload{}
func Upload(outputHandler sync.OutputHandler) bundle.Mutator {
return &upload{outputHandler}
}

View File

@ -3,8 +3,10 @@ package lock
import (
"context"
"errors"
"io/fs"
"github.com/databricks/cli/bundle"
"github.com/databricks/cli/bundle/permissions"
"github.com/databricks/cli/libs/diag"
"github.com/databricks/cli/libs/filer"
"github.com/databricks/cli/libs/locker"
@ -51,12 +53,17 @@ func (m *acquire) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics
if err != nil {
log.Errorf(ctx, "Failed to acquire deployment lock: %v", err)
if errors.Is(err, fs.ErrPermission) {
return permissions.ReportPossiblePermissionDenied(ctx, b, b.Config.Workspace.StatePath)
}
notExistsError := filer.NoSuchDirectoryError{}
if errors.As(err, &notExistsError) {
// If we get a "doesn't exist" error from the API this indicates
// we either don't have permissions or the path is invalid.
return diag.Errorf("cannot write to deployment root (this can indicate a previous deploy was done with a different identity): %s", b.Config.Workspace.RootPath)
return permissions.ReportPossiblePermissionDenied(ctx, b, b.Config.Workspace.StatePath)
}
return diag.FromErr(err)
}

View File

@ -40,7 +40,7 @@ func (m *compute) Apply(_ context.Context, b *bundle.Bundle) diag.Diagnostics {
// Compute config file path the job is defined in, relative to the bundle
// root
l := b.Config.GetLocation("resources.jobs." + name)
relativePath, err := filepath.Rel(b.RootPath, l.File)
relativePath, err := filepath.Rel(b.BundleRootPath, l.File)
if err != nil {
return diag.Errorf("failed to compute relative path for job %s: %v", name, err)
}

View File

@ -9,6 +9,7 @@ import (
"github.com/databricks/cli/bundle/config/resources"
"github.com/databricks/cli/bundle/internal/bundletest"
"github.com/databricks/cli/bundle/metadata"
"github.com/databricks/cli/libs/dyn"
"github.com/databricks/databricks-sdk-go/service/jobs"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
@ -55,9 +56,9 @@ func TestComputeMetadataMutator(t *testing.T) {
},
}
bundletest.SetLocation(b, "resources.jobs.my-job-1", "a/b/c")
bundletest.SetLocation(b, "resources.jobs.my-job-2", "d/e/f")
bundletest.SetLocation(b, "resources.pipelines.my-pipeline", "abc")
bundletest.SetLocation(b, "resources.jobs.my-job-1", []dyn.Location{{File: "a/b/c"}})
bundletest.SetLocation(b, "resources.jobs.my-job-2", []dyn.Location{{File: "d/e/f"}})
bundletest.SetLocation(b, "resources.pipelines.my-pipeline", []dyn.Location{{File: "abc"}})
expectedMetadata := metadata.Metadata{
Version: metadata.Version,

View File

@ -62,7 +62,7 @@ func testStatePull(t *testing.T, opts statePullOpts) {
tmpDir := t.TempDir()
b := &bundle.Bundle{
RootPath: tmpDir,
BundleRootPath: tmpDir,
BundleRoot: vfs.MustNew(tmpDir),
SyncRootPath: tmpDir,
@ -259,7 +259,7 @@ func TestStatePullNoState(t *testing.T) {
}}
b := &bundle.Bundle{
RootPath: t.TempDir(),
BundleRootPath: t.TempDir(),
Config: config.Root{
Bundle: config.Bundle{
Target: "default",
@ -447,7 +447,7 @@ func TestStatePullNewerDeploymentStateVersion(t *testing.T) {
}}
b := &bundle.Bundle{
RootPath: t.TempDir(),
BundleRootPath: t.TempDir(),
Config: config.Root{
Bundle: config.Bundle{
Target: "default",

View File

@ -10,6 +10,8 @@ import (
"github.com/databricks/cli/libs/log"
)
const MaxStateFileSize = 10 * 1024 * 1024 // 10MB
type statePush struct {
filerFactory FilerFactory
}
@ -35,6 +37,17 @@ func (s *statePush) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostic
}
defer local.Close()
if !b.Config.Bundle.Force {
state, err := local.Stat()
if err != nil {
return diag.FromErr(err)
}
if state.Size() > MaxStateFileSize {
return diag.Errorf("Deployment state file size exceeds the maximum allowed size of %d bytes. Please reduce the number of resources in your bundle, split your bundle into multiple or re-run the command with --force flag.", MaxStateFileSize)
}
}
log.Infof(ctx, "Writing local deployment state file to remote state directory")
err = f.Write(ctx, DeploymentStateFileName, local, filer.CreateParentDirectories, filer.OverwriteIfExists)
if err != nil {

View File

@ -45,7 +45,7 @@ func TestStatePush(t *testing.T) {
}}
b := &bundle.Bundle{
RootPath: t.TempDir(),
BundleRootPath: t.TempDir(),
Config: config.Root{
Bundle: config.Bundle{
Target: "default",

View File

@ -27,7 +27,7 @@ func setupBundleForStateUpdate(t *testing.T) *bundle.Bundle {
require.NoError(t, err)
return &bundle.Bundle{
RootPath: tmpDir,
BundleRootPath: tmpDir,
Config: config.Root{
Bundle: config.Bundle{
Target: "default",

View File

@ -4,6 +4,7 @@ import (
"context"
"github.com/databricks/cli/bundle"
"github.com/databricks/cli/bundle/permissions"
"github.com/databricks/cli/libs/diag"
"github.com/databricks/cli/libs/log"
"github.com/hashicorp/terraform-exec/tfexec"
@ -34,6 +35,10 @@ func (w *apply) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
// Apply terraform according to the computed plan
err := tf.Apply(ctx, tfexec.DirOrPlan(b.Plan.Path))
if err != nil {
diags := permissions.TryExtendTerraformPermissionError(ctx, b, err)
if diags != nil {
return diags
}
return diag.Errorf("terraform apply: %v", err)
}

Some files were not shown because too many files have changed in this diff Show More