mirror of https://github.com/databricks/cli.git
Merge remote-tracking branch 'databricks/main' into cp-summary-with-urls
This commit is contained in:
commit
7911c672ba
|
@ -1 +1 @@
|
||||||
d05898328669a3f8ab0c2ecee37db2673d3ea3f7
|
6f6b1371e640f2dfeba72d365ac566368656f6b6
|
|
@ -6,6 +6,7 @@ cmd/account/cmd.go linguist-generated=true
|
||||||
cmd/account/credentials/credentials.go linguist-generated=true
|
cmd/account/credentials/credentials.go linguist-generated=true
|
||||||
cmd/account/csp-enablement-account/csp-enablement-account.go linguist-generated=true
|
cmd/account/csp-enablement-account/csp-enablement-account.go linguist-generated=true
|
||||||
cmd/account/custom-app-integration/custom-app-integration.go linguist-generated=true
|
cmd/account/custom-app-integration/custom-app-integration.go linguist-generated=true
|
||||||
|
cmd/account/disable-legacy-features/disable-legacy-features.go linguist-generated=true
|
||||||
cmd/account/encryption-keys/encryption-keys.go linguist-generated=true
|
cmd/account/encryption-keys/encryption-keys.go linguist-generated=true
|
||||||
cmd/account/esm-enablement-account/esm-enablement-account.go linguist-generated=true
|
cmd/account/esm-enablement-account/esm-enablement-account.go linguist-generated=true
|
||||||
cmd/account/groups/groups.go linguist-generated=true
|
cmd/account/groups/groups.go linguist-generated=true
|
||||||
|
@ -52,6 +53,7 @@ cmd/workspace/dashboard-widgets/dashboard-widgets.go linguist-generated=true
|
||||||
cmd/workspace/dashboards/dashboards.go linguist-generated=true
|
cmd/workspace/dashboards/dashboards.go linguist-generated=true
|
||||||
cmd/workspace/data-sources/data-sources.go linguist-generated=true
|
cmd/workspace/data-sources/data-sources.go linguist-generated=true
|
||||||
cmd/workspace/default-namespace/default-namespace.go linguist-generated=true
|
cmd/workspace/default-namespace/default-namespace.go linguist-generated=true
|
||||||
|
cmd/workspace/disable-legacy-access/disable-legacy-access.go linguist-generated=true
|
||||||
cmd/workspace/enhanced-security-monitoring/enhanced-security-monitoring.go linguist-generated=true
|
cmd/workspace/enhanced-security-monitoring/enhanced-security-monitoring.go linguist-generated=true
|
||||||
cmd/workspace/experiments/experiments.go linguist-generated=true
|
cmd/workspace/experiments/experiments.go linguist-generated=true
|
||||||
cmd/workspace/external-locations/external-locations.go linguist-generated=true
|
cmd/workspace/external-locations/external-locations.go linguist-generated=true
|
||||||
|
@ -108,6 +110,7 @@ cmd/workspace/storage-credentials/storage-credentials.go linguist-generated=true
|
||||||
cmd/workspace/system-schemas/system-schemas.go linguist-generated=true
|
cmd/workspace/system-schemas/system-schemas.go linguist-generated=true
|
||||||
cmd/workspace/table-constraints/table-constraints.go linguist-generated=true
|
cmd/workspace/table-constraints/table-constraints.go linguist-generated=true
|
||||||
cmd/workspace/tables/tables.go linguist-generated=true
|
cmd/workspace/tables/tables.go linguist-generated=true
|
||||||
|
cmd/workspace/temporary-table-credentials/temporary-table-credentials.go linguist-generated=true
|
||||||
cmd/workspace/token-management/token-management.go linguist-generated=true
|
cmd/workspace/token-management/token-management.go linguist-generated=true
|
||||||
cmd/workspace/tokens/tokens.go linguist-generated=true
|
cmd/workspace/tokens/tokens.go linguist-generated=true
|
||||||
cmd/workspace/users/users.go linguist-generated=true
|
cmd/workspace/users/users.go linguist-generated=true
|
||||||
|
|
|
@ -33,7 +33,7 @@ jobs:
|
||||||
- name: Setup Go
|
- name: Setup Go
|
||||||
uses: actions/setup-go@v5
|
uses: actions/setup-go@v5
|
||||||
with:
|
with:
|
||||||
go-version: 1.22.x
|
go-version: 1.22.7
|
||||||
|
|
||||||
- name: Setup Python
|
- name: Setup Python
|
||||||
uses: actions/setup-python@v5
|
uses: actions/setup-python@v5
|
||||||
|
@ -68,7 +68,7 @@ jobs:
|
||||||
- name: Setup Go
|
- name: Setup Go
|
||||||
uses: actions/setup-go@v5
|
uses: actions/setup-go@v5
|
||||||
with:
|
with:
|
||||||
go-version: 1.22.x
|
go-version: 1.22.7
|
||||||
|
|
||||||
# No need to download cached dependencies when running gofmt.
|
# No need to download cached dependencies when running gofmt.
|
||||||
cache: false
|
cache: false
|
||||||
|
@ -100,7 +100,7 @@ jobs:
|
||||||
- name: Setup Go
|
- name: Setup Go
|
||||||
uses: actions/setup-go@v5
|
uses: actions/setup-go@v5
|
||||||
with:
|
with:
|
||||||
go-version: 1.22.x
|
go-version: 1.22.7
|
||||||
|
|
||||||
# Github repo: https://github.com/ajv-validator/ajv-cli
|
# Github repo: https://github.com/ajv-validator/ajv-cli
|
||||||
- name: Install ajv-cli
|
- name: Install ajv-cli
|
||||||
|
|
|
@ -21,7 +21,7 @@ jobs:
|
||||||
- name: Setup Go
|
- name: Setup Go
|
||||||
uses: actions/setup-go@v5
|
uses: actions/setup-go@v5
|
||||||
with:
|
with:
|
||||||
go-version: 1.22.x
|
go-version: 1.22.7
|
||||||
|
|
||||||
# The default cache key for this action considers only the `go.sum` file.
|
# The default cache key for this action considers only the `go.sum` file.
|
||||||
# We include .goreleaser.yaml here to differentiate from the cache used by the push action
|
# We include .goreleaser.yaml here to differentiate from the cache used by the push action
|
||||||
|
|
|
@ -22,7 +22,7 @@ jobs:
|
||||||
- name: Setup Go
|
- name: Setup Go
|
||||||
uses: actions/setup-go@v5
|
uses: actions/setup-go@v5
|
||||||
with:
|
with:
|
||||||
go-version: 1.22.x
|
go-version: 1.22.7
|
||||||
|
|
||||||
# The default cache key for this action considers only the `go.sum` file.
|
# The default cache key for this action considers only the `go.sum` file.
|
||||||
# We include .goreleaser.yaml here to differentiate from the cache used by the push action
|
# We include .goreleaser.yaml here to differentiate from the cache used by the push action
|
||||||
|
|
67
CHANGELOG.md
67
CHANGELOG.md
|
@ -1,5 +1,72 @@
|
||||||
# Version changelog
|
# Version changelog
|
||||||
|
|
||||||
|
## [Release] Release v0.229.0
|
||||||
|
|
||||||
|
Bundles:
|
||||||
|
* Added support for creating all-purpose clusters ([#1698](https://github.com/databricks/cli/pull/1698)).
|
||||||
|
* Reduce time until the prompt is shown for bundle run ([#1727](https://github.com/databricks/cli/pull/1727)).
|
||||||
|
* Use Unity Catalog for pipelines in the default-python template ([#1766](https://github.com/databricks/cli/pull/1766)).
|
||||||
|
* Add verbose flag to the "bundle deploy" command ([#1774](https://github.com/databricks/cli/pull/1774)).
|
||||||
|
* Fixed full variable override detection ([#1787](https://github.com/databricks/cli/pull/1787)).
|
||||||
|
* Add sub-extension to resource files in built-in templates ([#1777](https://github.com/databricks/cli/pull/1777)).
|
||||||
|
* Fix panic in `apply_presets.go` ([#1796](https://github.com/databricks/cli/pull/1796)).
|
||||||
|
|
||||||
|
Internal:
|
||||||
|
* Assert tokens are redacted in origin URL when username is not specified ([#1785](https://github.com/databricks/cli/pull/1785)).
|
||||||
|
* Refactor jobs path translation ([#1782](https://github.com/databricks/cli/pull/1782)).
|
||||||
|
* Add JobTaskClusterSpec validate mutator ([#1784](https://github.com/databricks/cli/pull/1784)).
|
||||||
|
* Pin Go toolchain to 1.22.7 ([#1790](https://github.com/databricks/cli/pull/1790)).
|
||||||
|
* Modify SetLocation test utility to take full locations as argument ([#1788](https://github.com/databricks/cli/pull/1788)).
|
||||||
|
* Simplified isFullVariableOverrideDef implementation ([#1791](https://github.com/databricks/cli/pull/1791)).
|
||||||
|
* Sort tasks by `task_key` before generating the Terraform configuration ([#1776](https://github.com/databricks/cli/pull/1776)).
|
||||||
|
* Trim trailing whitespace ([#1794](https://github.com/databricks/cli/pull/1794)).
|
||||||
|
* Move trampoline code into trampoline package ([#1793](https://github.com/databricks/cli/pull/1793)).
|
||||||
|
* Rename `RootPath` -> `BundleRootPath` ([#1792](https://github.com/databricks/cli/pull/1792)).
|
||||||
|
|
||||||
|
API Changes:
|
||||||
|
* Changed `databricks apps delete` command to return .
|
||||||
|
* Changed `databricks apps deploy` command with new required argument order.
|
||||||
|
* Changed `databricks apps start` command to return .
|
||||||
|
* Changed `databricks apps stop` command to return .
|
||||||
|
* Added `databricks temporary-table-credentials` command group.
|
||||||
|
* Added `databricks serving-endpoints put-ai-gateway` command.
|
||||||
|
* Added `databricks disable-legacy-access` command group.
|
||||||
|
* Added `databricks account disable-legacy-features` command group.
|
||||||
|
|
||||||
|
OpenAPI commit 6f6b1371e640f2dfeba72d365ac566368656f6b6 (2024-09-19)
|
||||||
|
Dependency updates:
|
||||||
|
* Upgrade to Go SDK 0.47.0 ([#1799](https://github.com/databricks/cli/pull/1799)).
|
||||||
|
* Upgrade to TF provider 1.52 ([#1781](https://github.com/databricks/cli/pull/1781)).
|
||||||
|
* Bump golang.org/x/mod from 0.20.0 to 0.21.0 ([#1758](https://github.com/databricks/cli/pull/1758)).
|
||||||
|
* Bump github.com/hashicorp/hc-install from 0.7.0 to 0.9.0 ([#1772](https://github.com/databricks/cli/pull/1772)).
|
||||||
|
|
||||||
|
## [Release] Release v0.228.1
|
||||||
|
|
||||||
|
Bundles:
|
||||||
|
* Added listing cluster filtering for cluster lookups ([#1754](https://github.com/databricks/cli/pull/1754)).
|
||||||
|
* Expand library globs relative to the sync root ([#1756](https://github.com/databricks/cli/pull/1756)).
|
||||||
|
* Fixed generated YAML missing 'default' for empty values ([#1765](https://github.com/databricks/cli/pull/1765)).
|
||||||
|
* Use periodic triggers in all templates ([#1739](https://github.com/databricks/cli/pull/1739)).
|
||||||
|
* Use the friendly name of service principals when shortening their name ([#1770](https://github.com/databricks/cli/pull/1770)).
|
||||||
|
* Fixed detecting full syntax variable override which includes type field ([#1775](https://github.com/databricks/cli/pull/1775)).
|
||||||
|
|
||||||
|
Internal:
|
||||||
|
* Pass copy of `dyn.Path` to callback function ([#1747](https://github.com/databricks/cli/pull/1747)).
|
||||||
|
* Make bundle JSON schema modular with `` ([#1700](https://github.com/databricks/cli/pull/1700)).
|
||||||
|
* Alias variables block in the `Target` struct ([#1748](https://github.com/databricks/cli/pull/1748)).
|
||||||
|
* Add end to end integration tests for bundle JSON schema ([#1726](https://github.com/databricks/cli/pull/1726)).
|
||||||
|
* Fix artifact upload integration tests ([#1767](https://github.com/databricks/cli/pull/1767)).
|
||||||
|
|
||||||
|
API Changes:
|
||||||
|
* Added `databricks quality-monitors regenerate-dashboard` command.
|
||||||
|
|
||||||
|
OpenAPI commit d05898328669a3f8ab0c2ecee37db2673d3ea3f7 (2024-09-04)
|
||||||
|
Dependency updates:
|
||||||
|
* Bump golang.org/x/term from 0.23.0 to 0.24.0 ([#1757](https://github.com/databricks/cli/pull/1757)).
|
||||||
|
* Bump golang.org/x/oauth2 from 0.22.0 to 0.23.0 ([#1761](https://github.com/databricks/cli/pull/1761)).
|
||||||
|
* Bump golang.org/x/text from 0.17.0 to 0.18.0 ([#1759](https://github.com/databricks/cli/pull/1759)).
|
||||||
|
* Bump github.com/databricks/databricks-sdk-go from 0.45.0 to 0.46.0 ([#1760](https://github.com/databricks/cli/pull/1760)).
|
||||||
|
|
||||||
## [Release] Release v0.228.0
|
## [Release] Release v0.228.0
|
||||||
|
|
||||||
CLI:
|
CLI:
|
||||||
|
|
|
@ -10,6 +10,7 @@ import (
|
||||||
"github.com/databricks/cli/bundle/config"
|
"github.com/databricks/cli/bundle/config"
|
||||||
"github.com/databricks/cli/bundle/internal/bundletest"
|
"github.com/databricks/cli/bundle/internal/bundletest"
|
||||||
"github.com/databricks/cli/internal/testutil"
|
"github.com/databricks/cli/internal/testutil"
|
||||||
|
"github.com/databricks/cli/libs/dyn"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
@ -23,7 +24,7 @@ func TestExpandGlobs_Nominal(t *testing.T) {
|
||||||
testutil.Touch(t, tmpDir, "bc.txt")
|
testutil.Touch(t, tmpDir, "bc.txt")
|
||||||
|
|
||||||
b := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
RootPath: tmpDir,
|
BundleRootPath: tmpDir,
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
Artifacts: config.Artifacts{
|
Artifacts: config.Artifacts{
|
||||||
"test": {
|
"test": {
|
||||||
|
@ -36,7 +37,7 @@ func TestExpandGlobs_Nominal(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
bundletest.SetLocation(b, "artifacts", filepath.Join(tmpDir, "databricks.yml"))
|
bundletest.SetLocation(b, "artifacts", []dyn.Location{{File: filepath.Join(tmpDir, "databricks.yml")}})
|
||||||
|
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
diags := bundle.Apply(ctx, b, bundle.Seq(
|
diags := bundle.Apply(ctx, b, bundle.Seq(
|
||||||
|
@ -62,7 +63,7 @@ func TestExpandGlobs_InvalidPattern(t *testing.T) {
|
||||||
tmpDir := t.TempDir()
|
tmpDir := t.TempDir()
|
||||||
|
|
||||||
b := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
RootPath: tmpDir,
|
BundleRootPath: tmpDir,
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
Artifacts: config.Artifacts{
|
Artifacts: config.Artifacts{
|
||||||
"test": {
|
"test": {
|
||||||
|
@ -77,7 +78,7 @@ func TestExpandGlobs_InvalidPattern(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
bundletest.SetLocation(b, "artifacts", filepath.Join(tmpDir, "databricks.yml"))
|
bundletest.SetLocation(b, "artifacts", []dyn.Location{{File: filepath.Join(tmpDir, "databricks.yml")}})
|
||||||
|
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
diags := bundle.Apply(ctx, b, bundle.Seq(
|
diags := bundle.Apply(ctx, b, bundle.Seq(
|
||||||
|
@ -110,7 +111,7 @@ func TestExpandGlobs_NoMatches(t *testing.T) {
|
||||||
testutil.Touch(t, tmpDir, "b2.txt")
|
testutil.Touch(t, tmpDir, "b2.txt")
|
||||||
|
|
||||||
b := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
RootPath: tmpDir,
|
BundleRootPath: tmpDir,
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
Artifacts: config.Artifacts{
|
Artifacts: config.Artifacts{
|
||||||
"test": {
|
"test": {
|
||||||
|
@ -125,7 +126,7 @@ func TestExpandGlobs_NoMatches(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
bundletest.SetLocation(b, "artifacts", filepath.Join(tmpDir, "databricks.yml"))
|
bundletest.SetLocation(b, "artifacts", []dyn.Location{{File: filepath.Join(tmpDir, "databricks.yml")}})
|
||||||
|
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
diags := bundle.Apply(ctx, b, bundle.Seq(
|
diags := bundle.Apply(ctx, b, bundle.Seq(
|
||||||
|
|
|
@ -47,7 +47,7 @@ func (m *prepare) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics
|
||||||
|
|
||||||
// If artifact path is not provided, use bundle root dir
|
// If artifact path is not provided, use bundle root dir
|
||||||
if artifact.Path == "" {
|
if artifact.Path == "" {
|
||||||
artifact.Path = b.RootPath
|
artifact.Path = b.BundleRootPath
|
||||||
}
|
}
|
||||||
|
|
||||||
if !filepath.IsAbs(artifact.Path) {
|
if !filepath.IsAbs(artifact.Path) {
|
||||||
|
|
|
@ -35,21 +35,21 @@ func (m *detectPkg) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostic
|
||||||
log.Infof(ctx, "Detecting Python wheel project...")
|
log.Infof(ctx, "Detecting Python wheel project...")
|
||||||
|
|
||||||
// checking if there is setup.py in the bundle root
|
// checking if there is setup.py in the bundle root
|
||||||
setupPy := filepath.Join(b.RootPath, "setup.py")
|
setupPy := filepath.Join(b.BundleRootPath, "setup.py")
|
||||||
_, err := os.Stat(setupPy)
|
_, err := os.Stat(setupPy)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Infof(ctx, "No Python wheel project found at bundle root folder")
|
log.Infof(ctx, "No Python wheel project found at bundle root folder")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Infof(ctx, fmt.Sprintf("Found Python wheel project at %s", b.RootPath))
|
log.Infof(ctx, fmt.Sprintf("Found Python wheel project at %s", b.BundleRootPath))
|
||||||
module := extractModuleName(setupPy)
|
module := extractModuleName(setupPy)
|
||||||
|
|
||||||
if b.Config.Artifacts == nil {
|
if b.Config.Artifacts == nil {
|
||||||
b.Config.Artifacts = make(map[string]*config.Artifact)
|
b.Config.Artifacts = make(map[string]*config.Artifact)
|
||||||
}
|
}
|
||||||
|
|
||||||
pkgPath, err := filepath.Abs(b.RootPath)
|
pkgPath, err := filepath.Abs(b.BundleRootPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return diag.FromErr(err)
|
return diag.FromErr(err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -31,22 +31,26 @@ import (
|
||||||
const internalFolder = ".internal"
|
const internalFolder = ".internal"
|
||||||
|
|
||||||
type Bundle struct {
|
type Bundle struct {
|
||||||
// RootPath contains the directory path to the root of the bundle.
|
// BundleRootPath is the local path to the root directory of the bundle.
|
||||||
// It is set when we instantiate a new bundle instance.
|
// It is set when we instantiate a new bundle instance.
|
||||||
RootPath string
|
BundleRootPath string
|
||||||
|
|
||||||
// BundleRoot is a virtual filesystem path to the root of the bundle.
|
// BundleRoot is a virtual filesystem path to [BundleRootPath].
|
||||||
// Exclusively use this field for filesystem operations.
|
// Exclusively use this field for filesystem operations.
|
||||||
BundleRoot vfs.Path
|
BundleRoot vfs.Path
|
||||||
|
|
||||||
// SyncRoot is a virtual filesystem path to the root directory of the files that are synchronized to the workspace.
|
|
||||||
// It can be an ancestor to [BundleRoot], but not a descendant; that is, [SyncRoot] must contain [BundleRoot].
|
|
||||||
SyncRoot vfs.Path
|
|
||||||
|
|
||||||
// SyncRootPath is the local path to the root directory of files that are synchronized to the workspace.
|
// SyncRootPath is the local path to the root directory of files that are synchronized to the workspace.
|
||||||
// It is equal to `SyncRoot.Native()` and included as dedicated field for convenient access.
|
// By default, it is the same as [BundleRootPath].
|
||||||
|
// If it is different, it must be an ancestor to [BundleRootPath].
|
||||||
|
// That is, [SyncRootPath] must contain [BundleRootPath].
|
||||||
SyncRootPath string
|
SyncRootPath string
|
||||||
|
|
||||||
|
// SyncRoot is a virtual filesystem path to [SyncRootPath].
|
||||||
|
// Exclusively use this field for filesystem operations.
|
||||||
|
SyncRoot vfs.Path
|
||||||
|
|
||||||
|
// Config contains the bundle configuration.
|
||||||
|
// It is loaded from the bundle configuration files and mutators may update it.
|
||||||
Config config.Root
|
Config config.Root
|
||||||
|
|
||||||
// Metadata about the bundle deployment. This is the interface Databricks services
|
// Metadata about the bundle deployment. This is the interface Databricks services
|
||||||
|
@ -84,14 +88,14 @@ type Bundle struct {
|
||||||
|
|
||||||
func Load(ctx context.Context, path string) (*Bundle, error) {
|
func Load(ctx context.Context, path string) (*Bundle, error) {
|
||||||
b := &Bundle{
|
b := &Bundle{
|
||||||
RootPath: filepath.Clean(path),
|
BundleRootPath: filepath.Clean(path),
|
||||||
BundleRoot: vfs.MustNew(path),
|
BundleRoot: vfs.MustNew(path),
|
||||||
}
|
}
|
||||||
configFile, err := config.FileNames.FindInPath(path)
|
configFile, err := config.FileNames.FindInPath(path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
log.Debugf(ctx, "Found bundle root at %s (file %s)", b.RootPath, configFile)
|
log.Debugf(ctx, "Found bundle root at %s (file %s)", b.BundleRootPath, configFile)
|
||||||
return b, nil
|
return b, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -160,7 +164,7 @@ func (b *Bundle) CacheDir(ctx context.Context, paths ...string) (string, error)
|
||||||
if !exists || cacheDirName == "" {
|
if !exists || cacheDirName == "" {
|
||||||
cacheDirName = filepath.Join(
|
cacheDirName = filepath.Join(
|
||||||
// Anchor at bundle root directory.
|
// Anchor at bundle root directory.
|
||||||
b.RootPath,
|
b.BundleRootPath,
|
||||||
// Static cache directory.
|
// Static cache directory.
|
||||||
".databricks",
|
".databricks",
|
||||||
"bundle",
|
"bundle",
|
||||||
|
@ -212,7 +216,7 @@ func (b *Bundle) GetSyncIncludePatterns(ctx context.Context) ([]string, error) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
internalDirRel, err := filepath.Rel(b.RootPath, internalDir)
|
internalDirRel, err := filepath.Rel(b.BundleRootPath, internalDir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
|
@ -21,7 +21,7 @@ func (r ReadOnlyBundle) Config() config.Root {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r ReadOnlyBundle) RootPath() string {
|
func (r ReadOnlyBundle) RootPath() string {
|
||||||
return r.b.RootPath
|
return r.b.BundleRootPath
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r ReadOnlyBundle) BundleRoot() vfs.Path {
|
func (r ReadOnlyBundle) BundleRoot() vfs.Path {
|
||||||
|
|
|
@ -79,7 +79,7 @@ func TestBundleMustLoadSuccess(t *testing.T) {
|
||||||
t.Setenv(env.RootVariable, "./tests/basic")
|
t.Setenv(env.RootVariable, "./tests/basic")
|
||||||
b, err := MustLoad(context.Background())
|
b, err := MustLoad(context.Background())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, "tests/basic", filepath.ToSlash(b.RootPath))
|
assert.Equal(t, "tests/basic", filepath.ToSlash(b.BundleRootPath))
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestBundleMustLoadFailureWithEnv(t *testing.T) {
|
func TestBundleMustLoadFailureWithEnv(t *testing.T) {
|
||||||
|
@ -98,7 +98,7 @@ func TestBundleTryLoadSuccess(t *testing.T) {
|
||||||
t.Setenv(env.RootVariable, "./tests/basic")
|
t.Setenv(env.RootVariable, "./tests/basic")
|
||||||
b, err := TryLoad(context.Background())
|
b, err := TryLoad(context.Background())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, "tests/basic", filepath.ToSlash(b.RootPath))
|
assert.Equal(t, "tests/basic", filepath.ToSlash(b.BundleRootPath))
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestBundleTryLoadFailureWithEnv(t *testing.T) {
|
func TestBundleTryLoadFailureWithEnv(t *testing.T) {
|
||||||
|
|
|
@ -38,8 +38,11 @@ type Bundle struct {
|
||||||
// Annotated readonly as this should be set at the target level.
|
// Annotated readonly as this should be set at the target level.
|
||||||
Mode Mode `json:"mode,omitempty" bundle:"readonly"`
|
Mode Mode `json:"mode,omitempty" bundle:"readonly"`
|
||||||
|
|
||||||
// Overrides the compute used for jobs and other supported assets.
|
// DEPRECATED: Overrides the compute used for jobs and other supported assets.
|
||||||
ComputeID string `json:"compute_id,omitempty"`
|
ComputeId string `json:"compute_id,omitempty"`
|
||||||
|
|
||||||
|
// Overrides the cluster used for jobs and other supported assets.
|
||||||
|
ClusterId string `json:"cluster_id,omitempty"`
|
||||||
|
|
||||||
// Deployment section specifies deployment related configuration for bundle
|
// Deployment section specifies deployment related configuration for bundle
|
||||||
Deployment Deployment `json:"deployment,omitempty"`
|
Deployment Deployment `json:"deployment,omitempty"`
|
||||||
|
|
|
@ -20,7 +20,7 @@ func (m *entryPoint) Name() string {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *entryPoint) Apply(_ context.Context, b *bundle.Bundle) diag.Diagnostics {
|
func (m *entryPoint) Apply(_ context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||||
path, err := config.FileNames.FindInPath(b.RootPath)
|
path, err := config.FileNames.FindInPath(b.BundleRootPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return diag.FromErr(err)
|
return diag.FromErr(err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -18,7 +18,7 @@ func TestEntryPointNoRootPath(t *testing.T) {
|
||||||
|
|
||||||
func TestEntryPoint(t *testing.T) {
|
func TestEntryPoint(t *testing.T) {
|
||||||
b := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
RootPath: "testdata",
|
BundleRootPath: "testdata",
|
||||||
}
|
}
|
||||||
diags := bundle.Apply(context.Background(), b, loader.EntryPoint())
|
diags := bundle.Apply(context.Background(), b, loader.EntryPoint())
|
||||||
require.NoError(t, diags.Error())
|
require.NoError(t, diags.Error())
|
||||||
|
|
|
@ -14,7 +14,7 @@ import (
|
||||||
|
|
||||||
func TestProcessInclude(t *testing.T) {
|
func TestProcessInclude(t *testing.T) {
|
||||||
b := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
RootPath: "testdata",
|
BundleRootPath: "testdata",
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
Workspace: config.Workspace{
|
Workspace: config.Workspace{
|
||||||
Host: "foo",
|
Host: "foo",
|
||||||
|
@ -22,7 +22,7 @@ func TestProcessInclude(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
m := loader.ProcessInclude(filepath.Join(b.RootPath, "host.yml"), "host.yml")
|
m := loader.ProcessInclude(filepath.Join(b.BundleRootPath, "host.yml"), "host.yml")
|
||||||
assert.Equal(t, "ProcessInclude(host.yml)", m.Name())
|
assert.Equal(t, "ProcessInclude(host.yml)", m.Name())
|
||||||
|
|
||||||
// Assert the host value prior to applying the mutator
|
// Assert the host value prior to applying the mutator
|
||||||
|
|
|
@ -47,7 +47,7 @@ func (m *processRootIncludes) Apply(ctx context.Context, b *bundle.Bundle) diag.
|
||||||
}
|
}
|
||||||
|
|
||||||
// Anchor includes to the bundle root path.
|
// Anchor includes to the bundle root path.
|
||||||
matches, err := filepath.Glob(filepath.Join(b.RootPath, entry))
|
matches, err := filepath.Glob(filepath.Join(b.BundleRootPath, entry))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return diag.FromErr(err)
|
return diag.FromErr(err)
|
||||||
}
|
}
|
||||||
|
@ -61,7 +61,7 @@ func (m *processRootIncludes) Apply(ctx context.Context, b *bundle.Bundle) diag.
|
||||||
// Filter matches to ones we haven't seen yet.
|
// Filter matches to ones we haven't seen yet.
|
||||||
var includes []string
|
var includes []string
|
||||||
for _, match := range matches {
|
for _, match := range matches {
|
||||||
rel, err := filepath.Rel(b.RootPath, match)
|
rel, err := filepath.Rel(b.BundleRootPath, match)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return diag.FromErr(err)
|
return diag.FromErr(err)
|
||||||
}
|
}
|
||||||
|
@ -76,7 +76,7 @@ func (m *processRootIncludes) Apply(ctx context.Context, b *bundle.Bundle) diag.
|
||||||
slices.Sort(includes)
|
slices.Sort(includes)
|
||||||
files = append(files, includes...)
|
files = append(files, includes...)
|
||||||
for _, include := range includes {
|
for _, include := range includes {
|
||||||
out = append(out, ProcessInclude(filepath.Join(b.RootPath, include), include))
|
out = append(out, ProcessInclude(filepath.Join(b.BundleRootPath, include), include))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -15,7 +15,7 @@ import (
|
||||||
|
|
||||||
func TestProcessRootIncludesEmpty(t *testing.T) {
|
func TestProcessRootIncludesEmpty(t *testing.T) {
|
||||||
b := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
RootPath: ".",
|
BundleRootPath: ".",
|
||||||
}
|
}
|
||||||
diags := bundle.Apply(context.Background(), b, loader.ProcessRootIncludes())
|
diags := bundle.Apply(context.Background(), b, loader.ProcessRootIncludes())
|
||||||
require.NoError(t, diags.Error())
|
require.NoError(t, diags.Error())
|
||||||
|
@ -30,7 +30,7 @@ func TestProcessRootIncludesAbs(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
b := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
RootPath: ".",
|
BundleRootPath: ".",
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
Include: []string{
|
Include: []string{
|
||||||
"/tmp/*.yml",
|
"/tmp/*.yml",
|
||||||
|
@ -44,7 +44,7 @@ func TestProcessRootIncludesAbs(t *testing.T) {
|
||||||
|
|
||||||
func TestProcessRootIncludesSingleGlob(t *testing.T) {
|
func TestProcessRootIncludesSingleGlob(t *testing.T) {
|
||||||
b := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
RootPath: t.TempDir(),
|
BundleRootPath: t.TempDir(),
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
Include: []string{
|
Include: []string{
|
||||||
"*.yml",
|
"*.yml",
|
||||||
|
@ -52,9 +52,9 @@ func TestProcessRootIncludesSingleGlob(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
testutil.Touch(t, b.RootPath, "databricks.yml")
|
testutil.Touch(t, b.BundleRootPath, "databricks.yml")
|
||||||
testutil.Touch(t, b.RootPath, "a.yml")
|
testutil.Touch(t, b.BundleRootPath, "a.yml")
|
||||||
testutil.Touch(t, b.RootPath, "b.yml")
|
testutil.Touch(t, b.BundleRootPath, "b.yml")
|
||||||
|
|
||||||
diags := bundle.Apply(context.Background(), b, loader.ProcessRootIncludes())
|
diags := bundle.Apply(context.Background(), b, loader.ProcessRootIncludes())
|
||||||
require.NoError(t, diags.Error())
|
require.NoError(t, diags.Error())
|
||||||
|
@ -63,7 +63,7 @@ func TestProcessRootIncludesSingleGlob(t *testing.T) {
|
||||||
|
|
||||||
func TestProcessRootIncludesMultiGlob(t *testing.T) {
|
func TestProcessRootIncludesMultiGlob(t *testing.T) {
|
||||||
b := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
RootPath: t.TempDir(),
|
BundleRootPath: t.TempDir(),
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
Include: []string{
|
Include: []string{
|
||||||
"a*.yml",
|
"a*.yml",
|
||||||
|
@ -72,8 +72,8 @@ func TestProcessRootIncludesMultiGlob(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
testutil.Touch(t, b.RootPath, "a1.yml")
|
testutil.Touch(t, b.BundleRootPath, "a1.yml")
|
||||||
testutil.Touch(t, b.RootPath, "b1.yml")
|
testutil.Touch(t, b.BundleRootPath, "b1.yml")
|
||||||
|
|
||||||
diags := bundle.Apply(context.Background(), b, loader.ProcessRootIncludes())
|
diags := bundle.Apply(context.Background(), b, loader.ProcessRootIncludes())
|
||||||
require.NoError(t, diags.Error())
|
require.NoError(t, diags.Error())
|
||||||
|
@ -82,7 +82,7 @@ func TestProcessRootIncludesMultiGlob(t *testing.T) {
|
||||||
|
|
||||||
func TestProcessRootIncludesRemoveDups(t *testing.T) {
|
func TestProcessRootIncludesRemoveDups(t *testing.T) {
|
||||||
b := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
RootPath: t.TempDir(),
|
BundleRootPath: t.TempDir(),
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
Include: []string{
|
Include: []string{
|
||||||
"*.yml",
|
"*.yml",
|
||||||
|
@ -91,7 +91,7 @@ func TestProcessRootIncludesRemoveDups(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
testutil.Touch(t, b.RootPath, "a.yml")
|
testutil.Touch(t, b.BundleRootPath, "a.yml")
|
||||||
|
|
||||||
diags := bundle.Apply(context.Background(), b, loader.ProcessRootIncludes())
|
diags := bundle.Apply(context.Background(), b, loader.ProcessRootIncludes())
|
||||||
require.NoError(t, diags.Error())
|
require.NoError(t, diags.Error())
|
||||||
|
@ -100,7 +100,7 @@ func TestProcessRootIncludesRemoveDups(t *testing.T) {
|
||||||
|
|
||||||
func TestProcessRootIncludesNotExists(t *testing.T) {
|
func TestProcessRootIncludesNotExists(t *testing.T) {
|
||||||
b := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
RootPath: t.TempDir(),
|
BundleRootPath: t.TempDir(),
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
Include: []string{
|
Include: []string{
|
||||||
"notexist.yml",
|
"notexist.yml",
|
||||||
|
|
|
@ -35,8 +35,10 @@ func (m *applyPresets) Name() string {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *applyPresets) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
func (m *applyPresets) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||||
|
var diags diag.Diagnostics
|
||||||
|
|
||||||
if d := validatePauseStatus(b); d != nil {
|
if d := validatePauseStatus(b); d != nil {
|
||||||
return d
|
diags = diags.Extend(d)
|
||||||
}
|
}
|
||||||
|
|
||||||
r := b.Config.Resources
|
r := b.Config.Resources
|
||||||
|
@ -45,7 +47,11 @@ func (m *applyPresets) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnos
|
||||||
tags := toTagArray(t.Tags)
|
tags := toTagArray(t.Tags)
|
||||||
|
|
||||||
// Jobs presets: Prefix, Tags, JobsMaxConcurrentRuns, TriggerPauseStatus
|
// Jobs presets: Prefix, Tags, JobsMaxConcurrentRuns, TriggerPauseStatus
|
||||||
for _, j := range r.Jobs {
|
for key, j := range r.Jobs {
|
||||||
|
if j.JobSettings == nil {
|
||||||
|
diags = diags.Extend(diag.Errorf("job %s is not defined", key))
|
||||||
|
continue
|
||||||
|
}
|
||||||
j.Name = prefix + j.Name
|
j.Name = prefix + j.Name
|
||||||
if j.Tags == nil {
|
if j.Tags == nil {
|
||||||
j.Tags = make(map[string]string)
|
j.Tags = make(map[string]string)
|
||||||
|
@ -77,20 +83,27 @@ func (m *applyPresets) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnos
|
||||||
}
|
}
|
||||||
|
|
||||||
// Pipelines presets: Prefix, PipelinesDevelopment
|
// Pipelines presets: Prefix, PipelinesDevelopment
|
||||||
for i := range r.Pipelines {
|
for key, p := range r.Pipelines {
|
||||||
r.Pipelines[i].Name = prefix + r.Pipelines[i].Name
|
if p.PipelineSpec == nil {
|
||||||
|
diags = diags.Extend(diag.Errorf("pipeline %s is not defined", key))
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
p.Name = prefix + p.Name
|
||||||
if config.IsExplicitlyEnabled(t.PipelinesDevelopment) {
|
if config.IsExplicitlyEnabled(t.PipelinesDevelopment) {
|
||||||
r.Pipelines[i].Development = true
|
p.Development = true
|
||||||
}
|
}
|
||||||
if t.TriggerPauseStatus == config.Paused {
|
if t.TriggerPauseStatus == config.Paused {
|
||||||
r.Pipelines[i].Continuous = false
|
p.Continuous = false
|
||||||
}
|
}
|
||||||
|
|
||||||
// As of 2024-06, pipelines don't yet support tags
|
// As of 2024-06, pipelines don't yet support tags
|
||||||
}
|
}
|
||||||
|
|
||||||
// Models presets: Prefix, Tags
|
// Models presets: Prefix, Tags
|
||||||
for _, m := range r.Models {
|
for key, m := range r.Models {
|
||||||
|
if m.Model == nil {
|
||||||
|
diags = diags.Extend(diag.Errorf("model %s is not defined", key))
|
||||||
|
continue
|
||||||
|
}
|
||||||
m.Name = prefix + m.Name
|
m.Name = prefix + m.Name
|
||||||
for _, t := range tags {
|
for _, t := range tags {
|
||||||
exists := slices.ContainsFunc(m.Tags, func(modelTag ml.ModelTag) bool {
|
exists := slices.ContainsFunc(m.Tags, func(modelTag ml.ModelTag) bool {
|
||||||
|
@ -104,7 +117,11 @@ func (m *applyPresets) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnos
|
||||||
}
|
}
|
||||||
|
|
||||||
// Experiments presets: Prefix, Tags
|
// Experiments presets: Prefix, Tags
|
||||||
for _, e := range r.Experiments {
|
for key, e := range r.Experiments {
|
||||||
|
if e.Experiment == nil {
|
||||||
|
diags = diags.Extend(diag.Errorf("experiment %s is not defined", key))
|
||||||
|
continue
|
||||||
|
}
|
||||||
filepath := e.Name
|
filepath := e.Name
|
||||||
dir := path.Dir(filepath)
|
dir := path.Dir(filepath)
|
||||||
base := path.Base(filepath)
|
base := path.Base(filepath)
|
||||||
|
@ -128,39 +145,74 @@ func (m *applyPresets) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnos
|
||||||
}
|
}
|
||||||
|
|
||||||
// Model serving endpoint presets: Prefix
|
// Model serving endpoint presets: Prefix
|
||||||
for i := range r.ModelServingEndpoints {
|
for key, e := range r.ModelServingEndpoints {
|
||||||
r.ModelServingEndpoints[i].Name = normalizePrefix(prefix) + r.ModelServingEndpoints[i].Name
|
if e.CreateServingEndpoint == nil {
|
||||||
|
diags = diags.Extend(diag.Errorf("model serving endpoint %s is not defined", key))
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
e.Name = normalizePrefix(prefix) + e.Name
|
||||||
|
|
||||||
// As of 2024-06, model serving endpoints don't yet support tags
|
// As of 2024-06, model serving endpoints don't yet support tags
|
||||||
}
|
}
|
||||||
|
|
||||||
// Registered models presets: Prefix
|
// Registered models presets: Prefix
|
||||||
for i := range r.RegisteredModels {
|
for key, m := range r.RegisteredModels {
|
||||||
r.RegisteredModels[i].Name = normalizePrefix(prefix) + r.RegisteredModels[i].Name
|
if m.CreateRegisteredModelRequest == nil {
|
||||||
|
diags = diags.Extend(diag.Errorf("registered model %s is not defined", key))
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
m.Name = normalizePrefix(prefix) + m.Name
|
||||||
|
|
||||||
// As of 2024-06, registered models don't yet support tags
|
// As of 2024-06, registered models don't yet support tags
|
||||||
}
|
}
|
||||||
|
|
||||||
// Quality monitors presets: Prefix
|
// Quality monitors presets: Schedule
|
||||||
if t.TriggerPauseStatus == config.Paused {
|
if t.TriggerPauseStatus == config.Paused {
|
||||||
for i := range r.QualityMonitors {
|
for key, q := range r.QualityMonitors {
|
||||||
|
if q.CreateMonitor == nil {
|
||||||
|
diags = diags.Extend(diag.Errorf("quality monitor %s is not defined", key))
|
||||||
|
continue
|
||||||
|
}
|
||||||
// Remove all schedules from monitors, since they don't support pausing/unpausing.
|
// Remove all schedules from monitors, since they don't support pausing/unpausing.
|
||||||
// Quality monitors might support the "pause" property in the future, so at the
|
// Quality monitors might support the "pause" property in the future, so at the
|
||||||
// CLI level we do respect that property if it is set to "unpaused."
|
// CLI level we do respect that property if it is set to "unpaused."
|
||||||
if r.QualityMonitors[i].Schedule != nil && r.QualityMonitors[i].Schedule.PauseStatus != catalog.MonitorCronSchedulePauseStatusUnpaused {
|
if q.Schedule != nil && q.Schedule.PauseStatus != catalog.MonitorCronSchedulePauseStatusUnpaused {
|
||||||
r.QualityMonitors[i].Schedule = nil
|
q.Schedule = nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Schemas: Prefix
|
// Schemas: Prefix
|
||||||
for i := range r.Schemas {
|
for key, s := range r.Schemas {
|
||||||
r.Schemas[i].Name = normalizePrefix(prefix) + r.Schemas[i].Name
|
if s.CreateSchema == nil {
|
||||||
|
diags = diags.Extend(diag.Errorf("schema %s is not defined", key))
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
s.Name = normalizePrefix(prefix) + s.Name
|
||||||
// HTTP API for schemas doesn't yet support tags. It's only supported in
|
// HTTP API for schemas doesn't yet support tags. It's only supported in
|
||||||
// the Databricks UI and via the SQL API.
|
// the Databricks UI and via the SQL API.
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
// Clusters: Prefix, Tags
|
||||||
|
for key, c := range r.Clusters {
|
||||||
|
if c.ClusterSpec == nil {
|
||||||
|
diags = diags.Extend(diag.Errorf("cluster %s is not defined", key))
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
c.ClusterName = prefix + c.ClusterName
|
||||||
|
if c.CustomTags == nil {
|
||||||
|
c.CustomTags = make(map[string]string)
|
||||||
|
}
|
||||||
|
for _, tag := range tags {
|
||||||
|
normalisedKey := b.Tagging.NormalizeKey(tag.Key)
|
||||||
|
normalisedValue := b.Tagging.NormalizeValue(tag.Value)
|
||||||
|
if _, ok := c.CustomTags[normalisedKey]; !ok {
|
||||||
|
c.CustomTags[normalisedKey] = normalisedValue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return diags
|
||||||
}
|
}
|
||||||
|
|
||||||
func validatePauseStatus(b *bundle.Bundle) diag.Diagnostics {
|
func validatePauseStatus(b *bundle.Bundle) diag.Diagnostics {
|
||||||
|
|
|
@ -251,3 +251,116 @@ func TestApplyPresetsJobsMaxConcurrentRuns(t *testing.T) {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestApplyPresetsPrefixWithoutJobSettings(t *testing.T) {
|
||||||
|
b := &bundle.Bundle{
|
||||||
|
Config: config.Root{
|
||||||
|
Resources: config.Resources{
|
||||||
|
Jobs: map[string]*resources.Job{
|
||||||
|
"job1": {}, // no jobsettings inside
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Presets: config.Presets{
|
||||||
|
NamePrefix: "prefix-",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
diags := bundle.Apply(ctx, b, mutator.ApplyPresets())
|
||||||
|
|
||||||
|
require.ErrorContains(t, diags.Error(), "job job1 is not defined")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestApplyPresetsResourceNotDefined(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
resources config.Resources
|
||||||
|
error string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
resources: config.Resources{
|
||||||
|
Jobs: map[string]*resources.Job{
|
||||||
|
"job1": {}, // no jobsettings inside
|
||||||
|
},
|
||||||
|
},
|
||||||
|
error: "job job1 is not defined",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
resources: config.Resources{
|
||||||
|
Pipelines: map[string]*resources.Pipeline{
|
||||||
|
"pipeline1": {}, // no pipelinespec inside
|
||||||
|
},
|
||||||
|
},
|
||||||
|
error: "pipeline pipeline1 is not defined",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
resources: config.Resources{
|
||||||
|
Models: map[string]*resources.MlflowModel{
|
||||||
|
"model1": {}, // no model inside
|
||||||
|
},
|
||||||
|
},
|
||||||
|
error: "model model1 is not defined",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
resources: config.Resources{
|
||||||
|
Experiments: map[string]*resources.MlflowExperiment{
|
||||||
|
"experiment1": {}, // no experiment inside
|
||||||
|
},
|
||||||
|
},
|
||||||
|
error: "experiment experiment1 is not defined",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
resources: config.Resources{
|
||||||
|
ModelServingEndpoints: map[string]*resources.ModelServingEndpoint{
|
||||||
|
"endpoint1": {}, // no CreateServingEndpoint inside
|
||||||
|
},
|
||||||
|
RegisteredModels: map[string]*resources.RegisteredModel{
|
||||||
|
"model1": {}, // no CreateRegisteredModelRequest inside
|
||||||
|
},
|
||||||
|
},
|
||||||
|
error: "model serving endpoint endpoint1 is not defined",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
resources: config.Resources{
|
||||||
|
QualityMonitors: map[string]*resources.QualityMonitor{
|
||||||
|
"monitor1": {}, // no CreateMonitor inside
|
||||||
|
},
|
||||||
|
},
|
||||||
|
error: "quality monitor monitor1 is not defined",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
resources: config.Resources{
|
||||||
|
Schemas: map[string]*resources.Schema{
|
||||||
|
"schema1": {}, // no CreateSchema inside
|
||||||
|
},
|
||||||
|
},
|
||||||
|
error: "schema schema1 is not defined",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
resources: config.Resources{
|
||||||
|
Clusters: map[string]*resources.Cluster{
|
||||||
|
"cluster1": {}, // no ClusterSpec inside
|
||||||
|
},
|
||||||
|
},
|
||||||
|
error: "cluster cluster1 is not defined",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.error, func(t *testing.T) {
|
||||||
|
b := &bundle.Bundle{
|
||||||
|
Config: config.Root{
|
||||||
|
Resources: tt.resources,
|
||||||
|
Presets: config.Presets{
|
||||||
|
TriggerPauseStatus: config.Paused,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
diags := bundle.Apply(ctx, b, mutator.ApplyPresets())
|
||||||
|
|
||||||
|
require.ErrorContains(t, diags.Error(), tt.error)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -0,0 +1,87 @@
|
||||||
|
package mutator
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
"github.com/databricks/cli/bundle"
|
||||||
|
"github.com/databricks/cli/libs/diag"
|
||||||
|
"github.com/databricks/cli/libs/dyn"
|
||||||
|
)
|
||||||
|
|
||||||
|
type computeIdToClusterId struct{}
|
||||||
|
|
||||||
|
func ComputeIdToClusterId() bundle.Mutator {
|
||||||
|
return &computeIdToClusterId{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *computeIdToClusterId) Name() string {
|
||||||
|
return "ComputeIdToClusterId"
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *computeIdToClusterId) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||||
|
var diags diag.Diagnostics
|
||||||
|
|
||||||
|
// The "compute_id" key is set; rewrite it to "cluster_id".
|
||||||
|
err := b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) {
|
||||||
|
v, d := rewriteComputeIdToClusterId(v, dyn.NewPath(dyn.Key("bundle")))
|
||||||
|
diags = diags.Extend(d)
|
||||||
|
|
||||||
|
// Check if the "compute_id" key is set in any target overrides.
|
||||||
|
return dyn.MapByPattern(v, dyn.NewPattern(dyn.Key("targets"), dyn.AnyKey()), func(p dyn.Path, v dyn.Value) (dyn.Value, error) {
|
||||||
|
v, d := rewriteComputeIdToClusterId(v, dyn.Path{})
|
||||||
|
diags = diags.Extend(d)
|
||||||
|
return v, nil
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
diags = diags.Extend(diag.FromErr(err))
|
||||||
|
return diags
|
||||||
|
}
|
||||||
|
|
||||||
|
func rewriteComputeIdToClusterId(v dyn.Value, p dyn.Path) (dyn.Value, diag.Diagnostics) {
|
||||||
|
var diags diag.Diagnostics
|
||||||
|
computeIdPath := p.Append(dyn.Key("compute_id"))
|
||||||
|
computeId, err := dyn.GetByPath(v, computeIdPath)
|
||||||
|
|
||||||
|
// If the "compute_id" key is not set, we don't need to do anything.
|
||||||
|
if err != nil {
|
||||||
|
return v, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if computeId.Kind() == dyn.KindInvalid {
|
||||||
|
return v, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
diags = diags.Append(diag.Diagnostic{
|
||||||
|
Severity: diag.Warning,
|
||||||
|
Summary: "compute_id is deprecated, please use cluster_id instead",
|
||||||
|
Locations: computeId.Locations(),
|
||||||
|
Paths: []dyn.Path{computeIdPath},
|
||||||
|
})
|
||||||
|
|
||||||
|
clusterIdPath := p.Append(dyn.Key("cluster_id"))
|
||||||
|
nv, err := dyn.SetByPath(v, clusterIdPath, computeId)
|
||||||
|
if err != nil {
|
||||||
|
return dyn.InvalidValue, diag.FromErr(err)
|
||||||
|
}
|
||||||
|
// Drop the "compute_id" key.
|
||||||
|
vout, err := dyn.Walk(nv, func(p dyn.Path, v dyn.Value) (dyn.Value, error) {
|
||||||
|
switch len(p) {
|
||||||
|
case 0:
|
||||||
|
return v, nil
|
||||||
|
case 1:
|
||||||
|
if p[0] == dyn.Key("compute_id") {
|
||||||
|
return v, dyn.ErrDrop
|
||||||
|
}
|
||||||
|
return v, nil
|
||||||
|
case 2:
|
||||||
|
if p[1] == dyn.Key("compute_id") {
|
||||||
|
return v, dyn.ErrDrop
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return v, dyn.ErrSkip
|
||||||
|
})
|
||||||
|
|
||||||
|
diags = diags.Extend(diag.FromErr(err))
|
||||||
|
return vout, diags
|
||||||
|
}
|
|
@ -0,0 +1,57 @@
|
||||||
|
package mutator_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/databricks/cli/bundle"
|
||||||
|
"github.com/databricks/cli/bundle/config"
|
||||||
|
"github.com/databricks/cli/bundle/config/mutator"
|
||||||
|
"github.com/databricks/cli/libs/diag"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestComputeIdToClusterId(t *testing.T) {
|
||||||
|
b := &bundle.Bundle{
|
||||||
|
Config: config.Root{
|
||||||
|
Bundle: config.Bundle{
|
||||||
|
ComputeId: "compute-id",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
diags := bundle.Apply(context.Background(), b, mutator.ComputeIdToClusterId())
|
||||||
|
assert.NoError(t, diags.Error())
|
||||||
|
assert.Equal(t, "compute-id", b.Config.Bundle.ClusterId)
|
||||||
|
assert.Empty(t, b.Config.Bundle.ComputeId)
|
||||||
|
|
||||||
|
assert.Len(t, diags, 1)
|
||||||
|
assert.Equal(t, "compute_id is deprecated, please use cluster_id instead", diags[0].Summary)
|
||||||
|
assert.Equal(t, diag.Warning, diags[0].Severity)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestComputeIdToClusterIdInTargetOverride(t *testing.T) {
|
||||||
|
b := &bundle.Bundle{
|
||||||
|
Config: config.Root{
|
||||||
|
Targets: map[string]*config.Target{
|
||||||
|
"dev": {
|
||||||
|
ComputeId: "compute-id-dev",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
diags := bundle.Apply(context.Background(), b, mutator.ComputeIdToClusterId())
|
||||||
|
assert.NoError(t, diags.Error())
|
||||||
|
assert.Empty(t, b.Config.Targets["dev"].ComputeId)
|
||||||
|
|
||||||
|
diags = diags.Extend(bundle.Apply(context.Background(), b, mutator.SelectTarget("dev")))
|
||||||
|
assert.NoError(t, diags.Error())
|
||||||
|
|
||||||
|
assert.Equal(t, "compute-id-dev", b.Config.Bundle.ClusterId)
|
||||||
|
assert.Empty(t, b.Config.Bundle.ComputeId)
|
||||||
|
|
||||||
|
assert.Len(t, diags, 1)
|
||||||
|
assert.Equal(t, "compute_id is deprecated, please use cluster_id instead", diags[0].Summary)
|
||||||
|
assert.Equal(t, diag.Warning, diags[0].Severity)
|
||||||
|
}
|
|
@ -29,6 +29,10 @@ func (m *defineDefaultWorkspacePaths) Apply(ctx context.Context, b *bundle.Bundl
|
||||||
b.Config.Workspace.FilePath = path.Join(root, "files")
|
b.Config.Workspace.FilePath = path.Join(root, "files")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if b.Config.Workspace.ResourcePath == "" {
|
||||||
|
b.Config.Workspace.ResourcePath = path.Join(root, "resources")
|
||||||
|
}
|
||||||
|
|
||||||
if b.Config.Workspace.ArtifactPath == "" {
|
if b.Config.Workspace.ArtifactPath == "" {
|
||||||
b.Config.Workspace.ArtifactPath = path.Join(root, "artifacts")
|
b.Config.Workspace.ArtifactPath = path.Join(root, "artifacts")
|
||||||
}
|
}
|
||||||
|
|
|
@ -22,6 +22,7 @@ func TestDefineDefaultWorkspacePaths(t *testing.T) {
|
||||||
diags := bundle.Apply(context.Background(), b, mutator.DefineDefaultWorkspacePaths())
|
diags := bundle.Apply(context.Background(), b, mutator.DefineDefaultWorkspacePaths())
|
||||||
require.NoError(t, diags.Error())
|
require.NoError(t, diags.Error())
|
||||||
assert.Equal(t, "/files", b.Config.Workspace.FilePath)
|
assert.Equal(t, "/files", b.Config.Workspace.FilePath)
|
||||||
|
assert.Equal(t, "/resources", b.Config.Workspace.ResourcePath)
|
||||||
assert.Equal(t, "/artifacts", b.Config.Workspace.ArtifactPath)
|
assert.Equal(t, "/artifacts", b.Config.Workspace.ArtifactPath)
|
||||||
assert.Equal(t, "/state", b.Config.Workspace.StatePath)
|
assert.Equal(t, "/state", b.Config.Workspace.StatePath)
|
||||||
}
|
}
|
||||||
|
@ -32,6 +33,7 @@ func TestDefineDefaultWorkspacePathsAlreadySet(t *testing.T) {
|
||||||
Workspace: config.Workspace{
|
Workspace: config.Workspace{
|
||||||
RootPath: "/",
|
RootPath: "/",
|
||||||
FilePath: "/foo/bar",
|
FilePath: "/foo/bar",
|
||||||
|
ResourcePath: "/foo/bar",
|
||||||
ArtifactPath: "/foo/bar",
|
ArtifactPath: "/foo/bar",
|
||||||
StatePath: "/foo/bar",
|
StatePath: "/foo/bar",
|
||||||
},
|
},
|
||||||
|
@ -40,6 +42,7 @@ func TestDefineDefaultWorkspacePathsAlreadySet(t *testing.T) {
|
||||||
diags := bundle.Apply(context.Background(), b, mutator.DefineDefaultWorkspacePaths())
|
diags := bundle.Apply(context.Background(), b, mutator.DefineDefaultWorkspacePaths())
|
||||||
require.NoError(t, diags.Error())
|
require.NoError(t, diags.Error())
|
||||||
assert.Equal(t, "/foo/bar", b.Config.Workspace.FilePath)
|
assert.Equal(t, "/foo/bar", b.Config.Workspace.FilePath)
|
||||||
|
assert.Equal(t, "/foo/bar", b.Config.Workspace.ResourcePath)
|
||||||
assert.Equal(t, "/foo/bar", b.Config.Workspace.ArtifactPath)
|
assert.Equal(t, "/foo/bar", b.Config.Workspace.ArtifactPath)
|
||||||
assert.Equal(t, "/foo/bar", b.Config.Workspace.StatePath)
|
assert.Equal(t, "/foo/bar", b.Config.Workspace.StatePath)
|
||||||
}
|
}
|
||||||
|
|
|
@ -10,6 +10,7 @@ import (
|
||||||
"github.com/databricks/cli/bundle/config"
|
"github.com/databricks/cli/bundle/config"
|
||||||
"github.com/databricks/cli/bundle/config/resources"
|
"github.com/databricks/cli/bundle/config/resources"
|
||||||
"github.com/databricks/cli/bundle/internal/bundletest"
|
"github.com/databricks/cli/bundle/internal/bundletest"
|
||||||
|
"github.com/databricks/cli/libs/dyn"
|
||||||
"github.com/databricks/databricks-sdk-go/service/compute"
|
"github.com/databricks/databricks-sdk-go/service/compute"
|
||||||
"github.com/databricks/databricks-sdk-go/service/pipelines"
|
"github.com/databricks/databricks-sdk-go/service/pipelines"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
@ -41,7 +42,7 @@ func TestExpandGlobPathsInPipelines(t *testing.T) {
|
||||||
touchEmptyFile(t, filepath.Join(dir, "skip/test7.py"))
|
touchEmptyFile(t, filepath.Join(dir, "skip/test7.py"))
|
||||||
|
|
||||||
b := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
RootPath: dir,
|
BundleRootPath: dir,
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
Resources: config.Resources{
|
Resources: config.Resources{
|
||||||
Pipelines: map[string]*resources.Pipeline{
|
Pipelines: map[string]*resources.Pipeline{
|
||||||
|
@ -105,8 +106,8 @@ func TestExpandGlobPathsInPipelines(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
bundletest.SetLocation(b, ".", filepath.Join(dir, "resource.yml"))
|
bundletest.SetLocation(b, ".", []dyn.Location{{File: filepath.Join(dir, "resource.yml")}})
|
||||||
bundletest.SetLocation(b, "resources.pipelines.pipeline.libraries[3]", filepath.Join(dir, "relative", "resource.yml"))
|
bundletest.SetLocation(b, "resources.pipelines.pipeline.libraries[3]", []dyn.Location{{File: filepath.Join(dir, "relative", "resource.yml")}})
|
||||||
|
|
||||||
m := ExpandPipelineGlobPaths()
|
m := ExpandPipelineGlobPaths()
|
||||||
diags := bundle.Apply(context.Background(), b, m)
|
diags := bundle.Apply(context.Background(), b, m)
|
||||||
|
|
|
@ -33,7 +33,7 @@ func (m *expandWorkspaceRoot) Apply(ctx context.Context, b *bundle.Bundle) diag.
|
||||||
}
|
}
|
||||||
|
|
||||||
if strings.HasPrefix(root, "~/") {
|
if strings.HasPrefix(root, "~/") {
|
||||||
home := fmt.Sprintf("/Users/%s", currentUser.UserName)
|
home := fmt.Sprintf("/Workspace/Users/%s", currentUser.UserName)
|
||||||
b.Config.Workspace.RootPath = path.Join(home, root[2:])
|
b.Config.Workspace.RootPath = path.Join(home, root[2:])
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -27,7 +27,7 @@ func TestExpandWorkspaceRoot(t *testing.T) {
|
||||||
}
|
}
|
||||||
diags := bundle.Apply(context.Background(), b, mutator.ExpandWorkspaceRoot())
|
diags := bundle.Apply(context.Background(), b, mutator.ExpandWorkspaceRoot())
|
||||||
require.NoError(t, diags.Error())
|
require.NoError(t, diags.Error())
|
||||||
assert.Equal(t, "/Users/jane@doe.com/foo", b.Config.Workspace.RootPath)
|
assert.Equal(t, "/Workspace/Users/jane@doe.com/foo", b.Config.Workspace.RootPath)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestExpandWorkspaceRootDoesNothing(t *testing.T) {
|
func TestExpandWorkspaceRootDoesNothing(t *testing.T) {
|
||||||
|
|
|
@ -56,7 +56,7 @@ func (m *loadGitDetails) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagn
|
||||||
}
|
}
|
||||||
|
|
||||||
// Compute relative path of the bundle root from the Git repo root.
|
// Compute relative path of the bundle root from the Git repo root.
|
||||||
absBundlePath, err := filepath.Abs(b.RootPath)
|
absBundlePath, err := filepath.Abs(b.BundleRootPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return diag.FromErr(err)
|
return diag.FromErr(err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -23,6 +23,7 @@ func DefaultMutators() []bundle.Mutator {
|
||||||
VerifyCliVersion(),
|
VerifyCliVersion(),
|
||||||
|
|
||||||
EnvironmentsToTargets(),
|
EnvironmentsToTargets(),
|
||||||
|
ComputeIdToClusterId(),
|
||||||
InitializeVariables(),
|
InitializeVariables(),
|
||||||
DefineDefaultTarget(),
|
DefineDefaultTarget(),
|
||||||
LoadGitDetails(),
|
LoadGitDetails(),
|
||||||
|
|
|
@ -39,22 +39,22 @@ func overrideJobCompute(j *resources.Job, compute string) {
|
||||||
|
|
||||||
func (m *overrideCompute) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
func (m *overrideCompute) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||||
if b.Config.Bundle.Mode != config.Development {
|
if b.Config.Bundle.Mode != config.Development {
|
||||||
if b.Config.Bundle.ComputeID != "" {
|
if b.Config.Bundle.ClusterId != "" {
|
||||||
return diag.Errorf("cannot override compute for an target that does not use 'mode: development'")
|
return diag.Errorf("cannot override compute for an target that does not use 'mode: development'")
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
if v := env.Get(ctx, "DATABRICKS_CLUSTER_ID"); v != "" {
|
if v := env.Get(ctx, "DATABRICKS_CLUSTER_ID"); v != "" {
|
||||||
b.Config.Bundle.ComputeID = v
|
b.Config.Bundle.ClusterId = v
|
||||||
}
|
}
|
||||||
|
|
||||||
if b.Config.Bundle.ComputeID == "" {
|
if b.Config.Bundle.ClusterId == "" {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
r := b.Config.Resources
|
r := b.Config.Resources
|
||||||
for i := range r.Jobs {
|
for i := range r.Jobs {
|
||||||
overrideJobCompute(r.Jobs[i], b.Config.Bundle.ComputeID)
|
overrideJobCompute(r.Jobs[i], b.Config.Bundle.ClusterId)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
|
|
@ -20,7 +20,7 @@ func TestOverrideDevelopment(t *testing.T) {
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
Bundle: config.Bundle{
|
Bundle: config.Bundle{
|
||||||
Mode: config.Development,
|
Mode: config.Development,
|
||||||
ComputeID: "newClusterID",
|
ClusterId: "newClusterID",
|
||||||
},
|
},
|
||||||
Resources: config.Resources{
|
Resources: config.Resources{
|
||||||
Jobs: map[string]*resources.Job{
|
Jobs: map[string]*resources.Job{
|
||||||
|
@ -144,7 +144,7 @@ func TestOverrideProduction(t *testing.T) {
|
||||||
b := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
Bundle: config.Bundle{
|
Bundle: config.Bundle{
|
||||||
ComputeID: "newClusterID",
|
ClusterId: "newClusterID",
|
||||||
},
|
},
|
||||||
Resources: config.Resources{
|
Resources: config.Resources{
|
||||||
Jobs: map[string]*resources.Job{
|
Jobs: map[string]*resources.Job{
|
||||||
|
|
|
@ -0,0 +1,115 @@
|
||||||
|
package paths
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/databricks/cli/bundle/libraries"
|
||||||
|
"github.com/databricks/cli/libs/dyn"
|
||||||
|
)
|
||||||
|
|
||||||
|
type jobRewritePattern struct {
|
||||||
|
pattern dyn.Pattern
|
||||||
|
kind PathKind
|
||||||
|
skipRewrite func(string) bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func noSkipRewrite(string) bool {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func jobTaskRewritePatterns(base dyn.Pattern) []jobRewritePattern {
|
||||||
|
return []jobRewritePattern{
|
||||||
|
{
|
||||||
|
base.Append(dyn.Key("notebook_task"), dyn.Key("notebook_path")),
|
||||||
|
PathKindNotebook,
|
||||||
|
noSkipRewrite,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
base.Append(dyn.Key("spark_python_task"), dyn.Key("python_file")),
|
||||||
|
PathKindWorkspaceFile,
|
||||||
|
noSkipRewrite,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
base.Append(dyn.Key("dbt_task"), dyn.Key("project_directory")),
|
||||||
|
PathKindDirectory,
|
||||||
|
noSkipRewrite,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
base.Append(dyn.Key("sql_task"), dyn.Key("file"), dyn.Key("path")),
|
||||||
|
PathKindWorkspaceFile,
|
||||||
|
noSkipRewrite,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
base.Append(dyn.Key("libraries"), dyn.AnyIndex(), dyn.Key("whl")),
|
||||||
|
PathKindLibrary,
|
||||||
|
noSkipRewrite,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
base.Append(dyn.Key("libraries"), dyn.AnyIndex(), dyn.Key("jar")),
|
||||||
|
PathKindLibrary,
|
||||||
|
noSkipRewrite,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
base.Append(dyn.Key("libraries"), dyn.AnyIndex(), dyn.Key("requirements")),
|
||||||
|
PathKindWorkspaceFile,
|
||||||
|
noSkipRewrite,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func jobRewritePatterns() []jobRewritePattern {
|
||||||
|
// Base pattern to match all tasks in all jobs.
|
||||||
|
base := dyn.NewPattern(
|
||||||
|
dyn.Key("resources"),
|
||||||
|
dyn.Key("jobs"),
|
||||||
|
dyn.AnyKey(),
|
||||||
|
dyn.Key("tasks"),
|
||||||
|
dyn.AnyIndex(),
|
||||||
|
)
|
||||||
|
|
||||||
|
// Compile list of patterns and their respective rewrite functions.
|
||||||
|
jobEnvironmentsPatterns := []jobRewritePattern{
|
||||||
|
{
|
||||||
|
dyn.NewPattern(
|
||||||
|
dyn.Key("resources"),
|
||||||
|
dyn.Key("jobs"),
|
||||||
|
dyn.AnyKey(),
|
||||||
|
dyn.Key("environments"),
|
||||||
|
dyn.AnyIndex(),
|
||||||
|
dyn.Key("spec"),
|
||||||
|
dyn.Key("dependencies"),
|
||||||
|
dyn.AnyIndex(),
|
||||||
|
),
|
||||||
|
PathKindWithPrefix,
|
||||||
|
func(s string) bool {
|
||||||
|
return !libraries.IsLibraryLocal(s)
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
taskPatterns := jobTaskRewritePatterns(base)
|
||||||
|
forEachPatterns := jobTaskRewritePatterns(base.Append(dyn.Key("for_each_task"), dyn.Key("task")))
|
||||||
|
allPatterns := append(taskPatterns, jobEnvironmentsPatterns...)
|
||||||
|
allPatterns = append(allPatterns, forEachPatterns...)
|
||||||
|
return allPatterns
|
||||||
|
}
|
||||||
|
|
||||||
|
// VisitJobPaths visits all paths in job resources and applies a function to each path.
|
||||||
|
func VisitJobPaths(value dyn.Value, fn VisitFunc) (dyn.Value, error) {
|
||||||
|
var err error
|
||||||
|
var newValue = value
|
||||||
|
|
||||||
|
for _, rewritePattern := range jobRewritePatterns() {
|
||||||
|
newValue, err = dyn.MapByPattern(newValue, rewritePattern.pattern, func(p dyn.Path, v dyn.Value) (dyn.Value, error) {
|
||||||
|
if rewritePattern.skipRewrite(v.MustString()) {
|
||||||
|
return v, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return fn(p, rewritePattern.kind, v)
|
||||||
|
})
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return dyn.InvalidValue, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return newValue, nil
|
||||||
|
}
|
|
@ -0,0 +1,168 @@
|
||||||
|
package paths
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/databricks/cli/bundle/config"
|
||||||
|
"github.com/databricks/cli/bundle/config/resources"
|
||||||
|
"github.com/databricks/cli/libs/dyn"
|
||||||
|
assert "github.com/databricks/cli/libs/dyn/dynassert"
|
||||||
|
"github.com/databricks/databricks-sdk-go/service/compute"
|
||||||
|
"github.com/databricks/databricks-sdk-go/service/jobs"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestVisitJobPaths(t *testing.T) {
|
||||||
|
task0 := jobs.Task{
|
||||||
|
NotebookTask: &jobs.NotebookTask{
|
||||||
|
NotebookPath: "abc",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
task1 := jobs.Task{
|
||||||
|
SparkPythonTask: &jobs.SparkPythonTask{
|
||||||
|
PythonFile: "abc",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
task2 := jobs.Task{
|
||||||
|
DbtTask: &jobs.DbtTask{
|
||||||
|
ProjectDirectory: "abc",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
task3 := jobs.Task{
|
||||||
|
SqlTask: &jobs.SqlTask{
|
||||||
|
File: &jobs.SqlTaskFile{
|
||||||
|
Path: "abc",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
task4 := jobs.Task{
|
||||||
|
Libraries: []compute.Library{
|
||||||
|
{Whl: "dist/foo.whl"},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
task5 := jobs.Task{
|
||||||
|
Libraries: []compute.Library{
|
||||||
|
{Jar: "dist/foo.jar"},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
task6 := jobs.Task{
|
||||||
|
Libraries: []compute.Library{
|
||||||
|
{Requirements: "requirements.txt"},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
job0 := &resources.Job{
|
||||||
|
JobSettings: &jobs.JobSettings{
|
||||||
|
Tasks: []jobs.Task{
|
||||||
|
task0,
|
||||||
|
task1,
|
||||||
|
task2,
|
||||||
|
task3,
|
||||||
|
task4,
|
||||||
|
task5,
|
||||||
|
task6,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
root := config.Root{
|
||||||
|
Resources: config.Resources{
|
||||||
|
Jobs: map[string]*resources.Job{
|
||||||
|
"job0": job0,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
actual := visitJobPaths(t, root)
|
||||||
|
expected := []dyn.Path{
|
||||||
|
dyn.MustPathFromString("resources.jobs.job0.tasks[0].notebook_task.notebook_path"),
|
||||||
|
dyn.MustPathFromString("resources.jobs.job0.tasks[1].spark_python_task.python_file"),
|
||||||
|
dyn.MustPathFromString("resources.jobs.job0.tasks[2].dbt_task.project_directory"),
|
||||||
|
dyn.MustPathFromString("resources.jobs.job0.tasks[3].sql_task.file.path"),
|
||||||
|
dyn.MustPathFromString("resources.jobs.job0.tasks[4].libraries[0].whl"),
|
||||||
|
dyn.MustPathFromString("resources.jobs.job0.tasks[5].libraries[0].jar"),
|
||||||
|
dyn.MustPathFromString("resources.jobs.job0.tasks[6].libraries[0].requirements"),
|
||||||
|
}
|
||||||
|
|
||||||
|
assert.ElementsMatch(t, expected, actual)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestVisitJobPaths_environments(t *testing.T) {
|
||||||
|
environment0 := jobs.JobEnvironment{
|
||||||
|
Spec: &compute.Environment{
|
||||||
|
Dependencies: []string{
|
||||||
|
"dist_0/*.whl",
|
||||||
|
"dist_1/*.whl",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
job0 := &resources.Job{
|
||||||
|
JobSettings: &jobs.JobSettings{
|
||||||
|
Environments: []jobs.JobEnvironment{
|
||||||
|
environment0,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
root := config.Root{
|
||||||
|
Resources: config.Resources{
|
||||||
|
Jobs: map[string]*resources.Job{
|
||||||
|
"job0": job0,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
actual := visitJobPaths(t, root)
|
||||||
|
expected := []dyn.Path{
|
||||||
|
dyn.MustPathFromString("resources.jobs.job0.environments[0].spec.dependencies[0]"),
|
||||||
|
dyn.MustPathFromString("resources.jobs.job0.environments[0].spec.dependencies[1]"),
|
||||||
|
}
|
||||||
|
|
||||||
|
assert.ElementsMatch(t, expected, actual)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestVisitJobPaths_foreach(t *testing.T) {
|
||||||
|
task0 := jobs.Task{
|
||||||
|
ForEachTask: &jobs.ForEachTask{
|
||||||
|
Task: jobs.Task{
|
||||||
|
NotebookTask: &jobs.NotebookTask{
|
||||||
|
NotebookPath: "abc",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
job0 := &resources.Job{
|
||||||
|
JobSettings: &jobs.JobSettings{
|
||||||
|
Tasks: []jobs.Task{
|
||||||
|
task0,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
root := config.Root{
|
||||||
|
Resources: config.Resources{
|
||||||
|
Jobs: map[string]*resources.Job{
|
||||||
|
"job0": job0,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
actual := visitJobPaths(t, root)
|
||||||
|
expected := []dyn.Path{
|
||||||
|
dyn.MustPathFromString("resources.jobs.job0.tasks[0].for_each_task.task.notebook_task.notebook_path"),
|
||||||
|
}
|
||||||
|
|
||||||
|
assert.ElementsMatch(t, expected, actual)
|
||||||
|
}
|
||||||
|
|
||||||
|
func visitJobPaths(t *testing.T, root config.Root) []dyn.Path {
|
||||||
|
var actual []dyn.Path
|
||||||
|
err := root.Mutate(func(value dyn.Value) (dyn.Value, error) {
|
||||||
|
return VisitJobPaths(value, func(p dyn.Path, kind PathKind, v dyn.Value) (dyn.Value, error) {
|
||||||
|
actual = append(actual, p)
|
||||||
|
return v, nil
|
||||||
|
})
|
||||||
|
})
|
||||||
|
require.NoError(t, err)
|
||||||
|
return actual
|
||||||
|
}
|
|
@ -0,0 +1,26 @@
|
||||||
|
package paths
|
||||||
|
|
||||||
|
import "github.com/databricks/cli/libs/dyn"
|
||||||
|
|
||||||
|
type PathKind int
|
||||||
|
|
||||||
|
const (
|
||||||
|
// PathKindLibrary is a path to a library file
|
||||||
|
PathKindLibrary = iota
|
||||||
|
|
||||||
|
// PathKindNotebook is a path to a notebook file
|
||||||
|
PathKindNotebook
|
||||||
|
|
||||||
|
// PathKindWorkspaceFile is a path to a regular workspace file,
|
||||||
|
// notebooks are not allowed because they are uploaded a special
|
||||||
|
// kind of workspace object.
|
||||||
|
PathKindWorkspaceFile
|
||||||
|
|
||||||
|
// PathKindWithPrefix is a path that starts with './'
|
||||||
|
PathKindWithPrefix
|
||||||
|
|
||||||
|
// PathKindDirectory is a path to directory
|
||||||
|
PathKindDirectory
|
||||||
|
)
|
||||||
|
|
||||||
|
type VisitFunc func(path dyn.Path, kind PathKind, value dyn.Value) (dyn.Value, error)
|
|
@ -33,7 +33,7 @@ func (m *populateCurrentUser) Apply(ctx context.Context, b *bundle.Bundle) diag.
|
||||||
}
|
}
|
||||||
|
|
||||||
b.Config.Workspace.CurrentUser = &config.User{
|
b.Config.Workspace.CurrentUser = &config.User{
|
||||||
ShortName: auth.GetShortUserName(me.UserName),
|
ShortName: auth.GetShortUserName(me),
|
||||||
User: me,
|
User: me,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,67 @@
|
||||||
|
package mutator
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/databricks/cli/bundle"
|
||||||
|
"github.com/databricks/cli/libs/diag"
|
||||||
|
"github.com/databricks/cli/libs/dyn"
|
||||||
|
)
|
||||||
|
|
||||||
|
type prependWorkspacePrefix struct{}
|
||||||
|
|
||||||
|
// PrependWorkspacePrefix prepends the workspace root path to all paths in the bundle.
|
||||||
|
func PrependWorkspacePrefix() bundle.Mutator {
|
||||||
|
return &prependWorkspacePrefix{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *prependWorkspacePrefix) Name() string {
|
||||||
|
return "PrependWorkspacePrefix"
|
||||||
|
}
|
||||||
|
|
||||||
|
var skipPrefixes = []string{
|
||||||
|
"/Workspace/",
|
||||||
|
"/Volumes/",
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *prependWorkspacePrefix) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||||
|
patterns := []dyn.Pattern{
|
||||||
|
dyn.NewPattern(dyn.Key("workspace"), dyn.Key("root_path")),
|
||||||
|
dyn.NewPattern(dyn.Key("workspace"), dyn.Key("file_path")),
|
||||||
|
dyn.NewPattern(dyn.Key("workspace"), dyn.Key("artifact_path")),
|
||||||
|
dyn.NewPattern(dyn.Key("workspace"), dyn.Key("state_path")),
|
||||||
|
}
|
||||||
|
|
||||||
|
err := b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) {
|
||||||
|
var err error
|
||||||
|
for _, pattern := range patterns {
|
||||||
|
v, err = dyn.MapByPattern(v, pattern, func(p dyn.Path, pv dyn.Value) (dyn.Value, error) {
|
||||||
|
path, ok := pv.AsString()
|
||||||
|
if !ok {
|
||||||
|
return dyn.InvalidValue, fmt.Errorf("expected string, got %s", v.Kind())
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, prefix := range skipPrefixes {
|
||||||
|
if strings.HasPrefix(path, prefix) {
|
||||||
|
return pv, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return dyn.NewValue(fmt.Sprintf("/Workspace%s", path), v.Locations()), nil
|
||||||
|
})
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return dyn.InvalidValue, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return v, nil
|
||||||
|
})
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return diag.FromErr(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
|
@ -0,0 +1,79 @@
|
||||||
|
package mutator
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/databricks/cli/bundle"
|
||||||
|
"github.com/databricks/cli/bundle/config"
|
||||||
|
"github.com/databricks/databricks-sdk-go/service/iam"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestPrependWorkspacePrefix(t *testing.T) {
|
||||||
|
testCases := []struct {
|
||||||
|
path string
|
||||||
|
expected string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
path: "/Users/test",
|
||||||
|
expected: "/Workspace/Users/test",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
path: "/Shared/test",
|
||||||
|
expected: "/Workspace/Shared/test",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
path: "/Workspace/Users/test",
|
||||||
|
expected: "/Workspace/Users/test",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
path: "/Volumes/Users/test",
|
||||||
|
expected: "/Volumes/Users/test",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range testCases {
|
||||||
|
b := &bundle.Bundle{
|
||||||
|
Config: config.Root{
|
||||||
|
Workspace: config.Workspace{
|
||||||
|
RootPath: tc.path,
|
||||||
|
ArtifactPath: tc.path,
|
||||||
|
FilePath: tc.path,
|
||||||
|
StatePath: tc.path,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
diags := bundle.Apply(context.Background(), b, PrependWorkspacePrefix())
|
||||||
|
require.Empty(t, diags)
|
||||||
|
require.Equal(t, tc.expected, b.Config.Workspace.RootPath)
|
||||||
|
require.Equal(t, tc.expected, b.Config.Workspace.ArtifactPath)
|
||||||
|
require.Equal(t, tc.expected, b.Config.Workspace.FilePath)
|
||||||
|
require.Equal(t, tc.expected, b.Config.Workspace.StatePath)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPrependWorkspaceForDefaultConfig(t *testing.T) {
|
||||||
|
b := &bundle.Bundle{
|
||||||
|
Config: config.Root{
|
||||||
|
Bundle: config.Bundle{
|
||||||
|
Name: "test",
|
||||||
|
Target: "dev",
|
||||||
|
},
|
||||||
|
Workspace: config.Workspace{
|
||||||
|
CurrentUser: &config.User{
|
||||||
|
User: &iam.User{
|
||||||
|
UserName: "jane@doe.com",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
diags := bundle.Apply(context.Background(), b, bundle.Seq(DefineDefaultWorkspaceRoot(), ExpandWorkspaceRoot(), DefineDefaultWorkspacePaths(), PrependWorkspacePrefix()))
|
||||||
|
require.Empty(t, diags)
|
||||||
|
require.Equal(t, "/Workspace/Users/jane@doe.com/.bundle/test/dev", b.Config.Workspace.RootPath)
|
||||||
|
require.Equal(t, "/Workspace/Users/jane@doe.com/.bundle/test/dev/artifacts", b.Config.Workspace.ArtifactPath)
|
||||||
|
require.Equal(t, "/Workspace/Users/jane@doe.com/.bundle/test/dev/files", b.Config.Workspace.FilePath)
|
||||||
|
require.Equal(t, "/Workspace/Users/jane@doe.com/.bundle/test/dev/state", b.Config.Workspace.StatePath)
|
||||||
|
}
|
|
@ -118,15 +118,18 @@ func findNonUserPath(b *bundle.Bundle) string {
|
||||||
if b.Config.Workspace.RootPath != "" && !containsName(b.Config.Workspace.RootPath) {
|
if b.Config.Workspace.RootPath != "" && !containsName(b.Config.Workspace.RootPath) {
|
||||||
return "root_path"
|
return "root_path"
|
||||||
}
|
}
|
||||||
if b.Config.Workspace.StatePath != "" && !containsName(b.Config.Workspace.StatePath) {
|
|
||||||
return "state_path"
|
|
||||||
}
|
|
||||||
if b.Config.Workspace.FilePath != "" && !containsName(b.Config.Workspace.FilePath) {
|
if b.Config.Workspace.FilePath != "" && !containsName(b.Config.Workspace.FilePath) {
|
||||||
return "file_path"
|
return "file_path"
|
||||||
}
|
}
|
||||||
|
if b.Config.Workspace.ResourcePath != "" && !containsName(b.Config.Workspace.ResourcePath) {
|
||||||
|
return "resource_path"
|
||||||
|
}
|
||||||
if b.Config.Workspace.ArtifactPath != "" && !containsName(b.Config.Workspace.ArtifactPath) {
|
if b.Config.Workspace.ArtifactPath != "" && !containsName(b.Config.Workspace.ArtifactPath) {
|
||||||
return "artifact_path"
|
return "artifact_path"
|
||||||
}
|
}
|
||||||
|
if b.Config.Workspace.StatePath != "" && !containsName(b.Config.Workspace.StatePath) {
|
||||||
|
return "state_path"
|
||||||
|
}
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -13,6 +13,7 @@ import (
|
||||||
"github.com/databricks/cli/libs/tags"
|
"github.com/databricks/cli/libs/tags"
|
||||||
sdkconfig "github.com/databricks/databricks-sdk-go/config"
|
sdkconfig "github.com/databricks/databricks-sdk-go/config"
|
||||||
"github.com/databricks/databricks-sdk-go/service/catalog"
|
"github.com/databricks/databricks-sdk-go/service/catalog"
|
||||||
|
"github.com/databricks/databricks-sdk-go/service/compute"
|
||||||
"github.com/databricks/databricks-sdk-go/service/iam"
|
"github.com/databricks/databricks-sdk-go/service/iam"
|
||||||
"github.com/databricks/databricks-sdk-go/service/jobs"
|
"github.com/databricks/databricks-sdk-go/service/jobs"
|
||||||
"github.com/databricks/databricks-sdk-go/service/ml"
|
"github.com/databricks/databricks-sdk-go/service/ml"
|
||||||
|
@ -119,6 +120,9 @@ func mockBundle(mode config.Mode) *bundle.Bundle {
|
||||||
Schemas: map[string]*resources.Schema{
|
Schemas: map[string]*resources.Schema{
|
||||||
"schema1": {CreateSchema: &catalog.CreateSchema{Name: "schema1"}},
|
"schema1": {CreateSchema: &catalog.CreateSchema{Name: "schema1"}},
|
||||||
},
|
},
|
||||||
|
Clusters: map[string]*resources.Cluster{
|
||||||
|
"cluster1": {ClusterSpec: &compute.ClusterSpec{ClusterName: "cluster1", SparkVersion: "13.2.x", NumWorkers: 1}},
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
// Use AWS implementation for testing.
|
// Use AWS implementation for testing.
|
||||||
|
@ -177,6 +181,9 @@ func TestProcessTargetModeDevelopment(t *testing.T) {
|
||||||
|
|
||||||
// Schema 1
|
// Schema 1
|
||||||
assert.Equal(t, "dev_lennart_schema1", b.Config.Resources.Schemas["schema1"].Name)
|
assert.Equal(t, "dev_lennart_schema1", b.Config.Resources.Schemas["schema1"].Name)
|
||||||
|
|
||||||
|
// Clusters
|
||||||
|
assert.Equal(t, "[dev lennart] cluster1", b.Config.Resources.Clusters["cluster1"].ClusterName)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestProcessTargetModeDevelopmentTagNormalizationForAws(t *testing.T) {
|
func TestProcessTargetModeDevelopmentTagNormalizationForAws(t *testing.T) {
|
||||||
|
@ -281,6 +288,7 @@ func TestProcessTargetModeDefault(t *testing.T) {
|
||||||
assert.Equal(t, "servingendpoint1", b.Config.Resources.ModelServingEndpoints["servingendpoint1"].Name)
|
assert.Equal(t, "servingendpoint1", b.Config.Resources.ModelServingEndpoints["servingendpoint1"].Name)
|
||||||
assert.Equal(t, "registeredmodel1", b.Config.Resources.RegisteredModels["registeredmodel1"].Name)
|
assert.Equal(t, "registeredmodel1", b.Config.Resources.RegisteredModels["registeredmodel1"].Name)
|
||||||
assert.Equal(t, "qualityMonitor1", b.Config.Resources.QualityMonitors["qualityMonitor1"].TableName)
|
assert.Equal(t, "qualityMonitor1", b.Config.Resources.QualityMonitors["qualityMonitor1"].TableName)
|
||||||
|
assert.Equal(t, "cluster1", b.Config.Resources.Clusters["cluster1"].ClusterName)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestProcessTargetModeProduction(t *testing.T) {
|
func TestProcessTargetModeProduction(t *testing.T) {
|
||||||
|
@ -312,6 +320,7 @@ func TestProcessTargetModeProduction(t *testing.T) {
|
||||||
b.Config.Resources.Experiments["experiment2"].Permissions = permissions
|
b.Config.Resources.Experiments["experiment2"].Permissions = permissions
|
||||||
b.Config.Resources.Models["model1"].Permissions = permissions
|
b.Config.Resources.Models["model1"].Permissions = permissions
|
||||||
b.Config.Resources.ModelServingEndpoints["servingendpoint1"].Permissions = permissions
|
b.Config.Resources.ModelServingEndpoints["servingendpoint1"].Permissions = permissions
|
||||||
|
b.Config.Resources.Clusters["cluster1"].Permissions = permissions
|
||||||
|
|
||||||
diags = validateProductionMode(context.Background(), b, false)
|
diags = validateProductionMode(context.Background(), b, false)
|
||||||
require.NoError(t, diags.Error())
|
require.NoError(t, diags.Error())
|
||||||
|
@ -322,6 +331,7 @@ func TestProcessTargetModeProduction(t *testing.T) {
|
||||||
assert.Equal(t, "servingendpoint1", b.Config.Resources.ModelServingEndpoints["servingendpoint1"].Name)
|
assert.Equal(t, "servingendpoint1", b.Config.Resources.ModelServingEndpoints["servingendpoint1"].Name)
|
||||||
assert.Equal(t, "registeredmodel1", b.Config.Resources.RegisteredModels["registeredmodel1"].Name)
|
assert.Equal(t, "registeredmodel1", b.Config.Resources.RegisteredModels["registeredmodel1"].Name)
|
||||||
assert.Equal(t, "qualityMonitor1", b.Config.Resources.QualityMonitors["qualityMonitor1"].TableName)
|
assert.Equal(t, "qualityMonitor1", b.Config.Resources.QualityMonitors["qualityMonitor1"].TableName)
|
||||||
|
assert.Equal(t, "cluster1", b.Config.Resources.Clusters["cluster1"].ClusterName)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestProcessTargetModeProductionOkForPrincipal(t *testing.T) {
|
func TestProcessTargetModeProductionOkForPrincipal(t *testing.T) {
|
||||||
|
|
|
@ -108,7 +108,7 @@ func (m *pythonMutator) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagno
|
||||||
return dyn.InvalidValue, fmt.Errorf("failed to create cache dir: %w", err)
|
return dyn.InvalidValue, fmt.Errorf("failed to create cache dir: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
rightRoot, diags := m.runPythonMutator(ctx, cacheDir, b.RootPath, pythonPath, leftRoot)
|
rightRoot, diags := m.runPythonMutator(ctx, cacheDir, b.BundleRootPath, pythonPath, leftRoot)
|
||||||
mutateDiags = diags
|
mutateDiags = diags
|
||||||
if diags.HasError() {
|
if diags.HasError() {
|
||||||
return dyn.InvalidValue, mutateDiagsHasError
|
return dyn.InvalidValue, mutateDiagsHasError
|
||||||
|
|
|
@ -45,15 +45,15 @@ func (m *rewriteSyncPaths) makeRelativeTo(root string) dyn.MapFunc {
|
||||||
func (m *rewriteSyncPaths) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
func (m *rewriteSyncPaths) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||||
err := b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) {
|
err := b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) {
|
||||||
return dyn.Map(v, "sync", func(_ dyn.Path, v dyn.Value) (nv dyn.Value, err error) {
|
return dyn.Map(v, "sync", func(_ dyn.Path, v dyn.Value) (nv dyn.Value, err error) {
|
||||||
v, err = dyn.Map(v, "paths", dyn.Foreach(m.makeRelativeTo(b.RootPath)))
|
v, err = dyn.Map(v, "paths", dyn.Foreach(m.makeRelativeTo(b.BundleRootPath)))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return dyn.InvalidValue, err
|
return dyn.InvalidValue, err
|
||||||
}
|
}
|
||||||
v, err = dyn.Map(v, "include", dyn.Foreach(m.makeRelativeTo(b.RootPath)))
|
v, err = dyn.Map(v, "include", dyn.Foreach(m.makeRelativeTo(b.BundleRootPath)))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return dyn.InvalidValue, err
|
return dyn.InvalidValue, err
|
||||||
}
|
}
|
||||||
v, err = dyn.Map(v, "exclude", dyn.Foreach(m.makeRelativeTo(b.RootPath)))
|
v, err = dyn.Map(v, "exclude", dyn.Foreach(m.makeRelativeTo(b.BundleRootPath)))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return dyn.InvalidValue, err
|
return dyn.InvalidValue, err
|
||||||
}
|
}
|
||||||
|
|
|
@ -9,12 +9,13 @@ import (
|
||||||
"github.com/databricks/cli/bundle/config"
|
"github.com/databricks/cli/bundle/config"
|
||||||
"github.com/databricks/cli/bundle/config/mutator"
|
"github.com/databricks/cli/bundle/config/mutator"
|
||||||
"github.com/databricks/cli/bundle/internal/bundletest"
|
"github.com/databricks/cli/bundle/internal/bundletest"
|
||||||
|
"github.com/databricks/cli/libs/dyn"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestRewriteSyncPathsRelative(t *testing.T) {
|
func TestRewriteSyncPathsRelative(t *testing.T) {
|
||||||
b := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
RootPath: ".",
|
BundleRootPath: ".",
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
Sync: config.Sync{
|
Sync: config.Sync{
|
||||||
Paths: []string{
|
Paths: []string{
|
||||||
|
@ -33,12 +34,12 @@ func TestRewriteSyncPathsRelative(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
bundletest.SetLocation(b, "sync.paths[0]", "./databricks.yml")
|
bundletest.SetLocation(b, "sync.paths[0]", []dyn.Location{{File: "./databricks.yml"}})
|
||||||
bundletest.SetLocation(b, "sync.paths[1]", "./databricks.yml")
|
bundletest.SetLocation(b, "sync.paths[1]", []dyn.Location{{File: "./databricks.yml"}})
|
||||||
bundletest.SetLocation(b, "sync.include[0]", "./file.yml")
|
bundletest.SetLocation(b, "sync.include[0]", []dyn.Location{{File: "./file.yml"}})
|
||||||
bundletest.SetLocation(b, "sync.include[1]", "./a/file.yml")
|
bundletest.SetLocation(b, "sync.include[1]", []dyn.Location{{File: "./a/file.yml"}})
|
||||||
bundletest.SetLocation(b, "sync.exclude[0]", "./a/b/file.yml")
|
bundletest.SetLocation(b, "sync.exclude[0]", []dyn.Location{{File: "./a/b/file.yml"}})
|
||||||
bundletest.SetLocation(b, "sync.exclude[1]", "./a/b/c/file.yml")
|
bundletest.SetLocation(b, "sync.exclude[1]", []dyn.Location{{File: "./a/b/c/file.yml"}})
|
||||||
|
|
||||||
diags := bundle.Apply(context.Background(), b, mutator.RewriteSyncPaths())
|
diags := bundle.Apply(context.Background(), b, mutator.RewriteSyncPaths())
|
||||||
assert.NoError(t, diags.Error())
|
assert.NoError(t, diags.Error())
|
||||||
|
@ -53,7 +54,7 @@ func TestRewriteSyncPathsRelative(t *testing.T) {
|
||||||
|
|
||||||
func TestRewriteSyncPathsAbsolute(t *testing.T) {
|
func TestRewriteSyncPathsAbsolute(t *testing.T) {
|
||||||
b := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
RootPath: "/tmp/dir",
|
BundleRootPath: "/tmp/dir",
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
Sync: config.Sync{
|
Sync: config.Sync{
|
||||||
Paths: []string{
|
Paths: []string{
|
||||||
|
@ -72,12 +73,12 @@ func TestRewriteSyncPathsAbsolute(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
bundletest.SetLocation(b, "sync.paths[0]", "/tmp/dir/databricks.yml")
|
bundletest.SetLocation(b, "sync.paths[0]", []dyn.Location{{File: "/tmp/dir/databricks.yml"}})
|
||||||
bundletest.SetLocation(b, "sync.paths[1]", "/tmp/dir/databricks.yml")
|
bundletest.SetLocation(b, "sync.paths[1]", []dyn.Location{{File: "/tmp/dir/databricks.yml"}})
|
||||||
bundletest.SetLocation(b, "sync.include[0]", "/tmp/dir/file.yml")
|
bundletest.SetLocation(b, "sync.include[0]", []dyn.Location{{File: "/tmp/dir/file.yml"}})
|
||||||
bundletest.SetLocation(b, "sync.include[1]", "/tmp/dir/a/file.yml")
|
bundletest.SetLocation(b, "sync.include[1]", []dyn.Location{{File: "/tmp/dir/a/file.yml"}})
|
||||||
bundletest.SetLocation(b, "sync.exclude[0]", "/tmp/dir/a/b/file.yml")
|
bundletest.SetLocation(b, "sync.exclude[0]", []dyn.Location{{File: "/tmp/dir/a/b/file.yml"}})
|
||||||
bundletest.SetLocation(b, "sync.exclude[1]", "/tmp/dir/a/b/c/file.yml")
|
bundletest.SetLocation(b, "sync.exclude[1]", []dyn.Location{{File: "/tmp/dir/a/b/c/file.yml"}})
|
||||||
|
|
||||||
diags := bundle.Apply(context.Background(), b, mutator.RewriteSyncPaths())
|
diags := bundle.Apply(context.Background(), b, mutator.RewriteSyncPaths())
|
||||||
assert.NoError(t, diags.Error())
|
assert.NoError(t, diags.Error())
|
||||||
|
@ -93,7 +94,7 @@ func TestRewriteSyncPathsAbsolute(t *testing.T) {
|
||||||
func TestRewriteSyncPathsErrorPaths(t *testing.T) {
|
func TestRewriteSyncPathsErrorPaths(t *testing.T) {
|
||||||
t.Run("no sync block", func(t *testing.T) {
|
t.Run("no sync block", func(t *testing.T) {
|
||||||
b := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
RootPath: ".",
|
BundleRootPath: ".",
|
||||||
}
|
}
|
||||||
|
|
||||||
diags := bundle.Apply(context.Background(), b, mutator.RewriteSyncPaths())
|
diags := bundle.Apply(context.Background(), b, mutator.RewriteSyncPaths())
|
||||||
|
@ -102,7 +103,7 @@ func TestRewriteSyncPathsErrorPaths(t *testing.T) {
|
||||||
|
|
||||||
t.Run("empty include/exclude blocks", func(t *testing.T) {
|
t.Run("empty include/exclude blocks", func(t *testing.T) {
|
||||||
b := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
RootPath: ".",
|
BundleRootPath: ".",
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
Sync: config.Sync{
|
Sync: config.Sync{
|
||||||
Include: []string{},
|
Include: []string{},
|
||||||
|
|
|
@ -0,0 +1,72 @@
|
||||||
|
package mutator
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/databricks/cli/bundle"
|
||||||
|
"github.com/databricks/cli/libs/diag"
|
||||||
|
"github.com/databricks/cli/libs/dyn"
|
||||||
|
)
|
||||||
|
|
||||||
|
type rewriteWorkspacePrefix struct{}
|
||||||
|
|
||||||
|
// RewriteWorkspacePrefix finds any strings in bundle configration that have
|
||||||
|
// workspace prefix plus workspace path variable used and removes workspace prefix from it.
|
||||||
|
func RewriteWorkspacePrefix() bundle.Mutator {
|
||||||
|
return &rewriteWorkspacePrefix{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *rewriteWorkspacePrefix) Name() string {
|
||||||
|
return "RewriteWorkspacePrefix"
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *rewriteWorkspacePrefix) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||||
|
diags := diag.Diagnostics{}
|
||||||
|
paths := map[string]string{
|
||||||
|
"/Workspace/${workspace.root_path}": "${workspace.root_path}",
|
||||||
|
"/Workspace${workspace.root_path}": "${workspace.root_path}",
|
||||||
|
"/Workspace/${workspace.file_path}": "${workspace.file_path}",
|
||||||
|
"/Workspace${workspace.file_path}": "${workspace.file_path}",
|
||||||
|
"/Workspace/${workspace.artifact_path}": "${workspace.artifact_path}",
|
||||||
|
"/Workspace${workspace.artifact_path}": "${workspace.artifact_path}",
|
||||||
|
"/Workspace/${workspace.state_path}": "${workspace.state_path}",
|
||||||
|
"/Workspace${workspace.state_path}": "${workspace.state_path}",
|
||||||
|
}
|
||||||
|
|
||||||
|
err := b.Config.Mutate(func(root dyn.Value) (dyn.Value, error) {
|
||||||
|
// Walk through the bundle configuration, check all the string leafs and
|
||||||
|
// see if any of the prefixes are used in the remote path.
|
||||||
|
return dyn.Walk(root, func(p dyn.Path, v dyn.Value) (dyn.Value, error) {
|
||||||
|
vv, ok := v.AsString()
|
||||||
|
if !ok {
|
||||||
|
return v, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
for path, replacePath := range paths {
|
||||||
|
if strings.Contains(vv, path) {
|
||||||
|
newPath := strings.Replace(vv, path, replacePath, 1)
|
||||||
|
diags = append(diags, diag.Diagnostic{
|
||||||
|
Severity: diag.Warning,
|
||||||
|
Summary: fmt.Sprintf("substring %q found in %q. Please update this to %q.", path, vv, newPath),
|
||||||
|
Detail: "For more information, please refer to: https://docs.databricks.com/en/release-notes/dev-tools/bundles.html#workspace-paths",
|
||||||
|
Locations: v.Locations(),
|
||||||
|
Paths: []dyn.Path{p},
|
||||||
|
})
|
||||||
|
|
||||||
|
// Remove the workspace prefix from the string.
|
||||||
|
return dyn.NewValue(newPath, v.Locations()), nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return v, nil
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return diag.FromErr(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return diags
|
||||||
|
}
|
|
@ -0,0 +1,85 @@
|
||||||
|
package mutator
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/databricks/cli/bundle"
|
||||||
|
"github.com/databricks/cli/bundle/config"
|
||||||
|
"github.com/databricks/cli/bundle/config/resources"
|
||||||
|
"github.com/databricks/cli/libs/diag"
|
||||||
|
"github.com/databricks/databricks-sdk-go/service/compute"
|
||||||
|
"github.com/databricks/databricks-sdk-go/service/jobs"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestNoWorkspacePrefixUsed(t *testing.T) {
|
||||||
|
b := &bundle.Bundle{
|
||||||
|
Config: config.Root{
|
||||||
|
Workspace: config.Workspace{
|
||||||
|
RootPath: "/Workspace/Users/test",
|
||||||
|
ArtifactPath: "/Workspace/Users/test/artifacts",
|
||||||
|
FilePath: "/Workspace/Users/test/files",
|
||||||
|
StatePath: "/Workspace/Users/test/state",
|
||||||
|
},
|
||||||
|
|
||||||
|
Resources: config.Resources{
|
||||||
|
Jobs: map[string]*resources.Job{
|
||||||
|
"test_job": {
|
||||||
|
JobSettings: &jobs.JobSettings{
|
||||||
|
Tasks: []jobs.Task{
|
||||||
|
{
|
||||||
|
SparkPythonTask: &jobs.SparkPythonTask{
|
||||||
|
PythonFile: "/Workspace/${workspace.root_path}/file1.py",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
NotebookTask: &jobs.NotebookTask{
|
||||||
|
NotebookPath: "/Workspace${workspace.file_path}/notebook1",
|
||||||
|
},
|
||||||
|
Libraries: []compute.Library{
|
||||||
|
{
|
||||||
|
Jar: "/Workspace/${workspace.artifact_path}/jar1.jar",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
NotebookTask: &jobs.NotebookTask{
|
||||||
|
NotebookPath: "${workspace.file_path}/notebook2",
|
||||||
|
},
|
||||||
|
Libraries: []compute.Library{
|
||||||
|
{
|
||||||
|
Jar: "${workspace.artifact_path}/jar2.jar",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
diags := bundle.Apply(context.Background(), b, RewriteWorkspacePrefix())
|
||||||
|
require.Len(t, diags, 3)
|
||||||
|
|
||||||
|
expectedErrors := map[string]bool{
|
||||||
|
`substring "/Workspace/${workspace.root_path}" found in "/Workspace/${workspace.root_path}/file1.py". Please update this to "${workspace.root_path}/file1.py".`: true,
|
||||||
|
`substring "/Workspace${workspace.file_path}" found in "/Workspace${workspace.file_path}/notebook1". Please update this to "${workspace.file_path}/notebook1".`: true,
|
||||||
|
`substring "/Workspace/${workspace.artifact_path}" found in "/Workspace/${workspace.artifact_path}/jar1.jar". Please update this to "${workspace.artifact_path}/jar1.jar".`: true,
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, d := range diags {
|
||||||
|
require.Equal(t, d.Severity, diag.Warning)
|
||||||
|
require.Contains(t, expectedErrors, d.Summary)
|
||||||
|
delete(expectedErrors, d.Summary)
|
||||||
|
}
|
||||||
|
|
||||||
|
require.Equal(t, "${workspace.root_path}/file1.py", b.Config.Resources.Jobs["test_job"].JobSettings.Tasks[0].SparkPythonTask.PythonFile)
|
||||||
|
require.Equal(t, "${workspace.file_path}/notebook1", b.Config.Resources.Jobs["test_job"].JobSettings.Tasks[1].NotebookTask.NotebookPath)
|
||||||
|
require.Equal(t, "${workspace.artifact_path}/jar1.jar", b.Config.Resources.Jobs["test_job"].JobSettings.Tasks[1].Libraries[0].Jar)
|
||||||
|
require.Equal(t, "${workspace.file_path}/notebook2", b.Config.Resources.Jobs["test_job"].JobSettings.Tasks[2].NotebookTask.NotebookPath)
|
||||||
|
require.Equal(t, "${workspace.artifact_path}/jar2.jar", b.Config.Resources.Jobs["test_job"].JobSettings.Tasks[2].Libraries[0].Jar)
|
||||||
|
|
||||||
|
}
|
|
@ -32,6 +32,7 @@ func allResourceTypes(t *testing.T) []string {
|
||||||
// the dyn library gives us the correct list of all resources supported. Please
|
// the dyn library gives us the correct list of all resources supported. Please
|
||||||
// also update this check when adding a new resource
|
// also update this check when adding a new resource
|
||||||
require.Equal(t, []string{
|
require.Equal(t, []string{
|
||||||
|
"clusters",
|
||||||
"experiments",
|
"experiments",
|
||||||
"jobs",
|
"jobs",
|
||||||
"model_serving_endpoints",
|
"model_serving_endpoints",
|
||||||
|
@ -133,6 +134,7 @@ func TestRunAsErrorForUnsupportedResources(t *testing.T) {
|
||||||
// some point in the future. These resources are (implicitly) on the deny list, since
|
// some point in the future. These resources are (implicitly) on the deny list, since
|
||||||
// they are not on the allow list below.
|
// they are not on the allow list below.
|
||||||
allowList := []string{
|
allowList := []string{
|
||||||
|
"clusters",
|
||||||
"jobs",
|
"jobs",
|
||||||
"models",
|
"models",
|
||||||
"registered_models",
|
"registered_models",
|
||||||
|
|
|
@ -15,7 +15,7 @@ import (
|
||||||
|
|
||||||
func TestSyncDefaultPath_DefaultIfUnset(t *testing.T) {
|
func TestSyncDefaultPath_DefaultIfUnset(t *testing.T) {
|
||||||
b := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
RootPath: "/tmp/some/dir",
|
BundleRootPath: "/tmp/some/dir",
|
||||||
Config: config.Root{},
|
Config: config.Root{},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -51,7 +51,7 @@ func TestSyncDefaultPath_SkipIfSet(t *testing.T) {
|
||||||
for _, tcase := range tcases {
|
for _, tcase := range tcases {
|
||||||
t.Run(tcase.name, func(t *testing.T) {
|
t.Run(tcase.name, func(t *testing.T) {
|
||||||
b := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
RootPath: "/tmp/some/dir",
|
BundleRootPath: "/tmp/some/dir",
|
||||||
Config: config.Root{},
|
Config: config.Root{},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -57,7 +57,7 @@ func (m *syncInferRoot) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagno
|
||||||
var diags diag.Diagnostics
|
var diags diag.Diagnostics
|
||||||
|
|
||||||
// Use the bundle root path as the starting point for inferring the sync root path.
|
// Use the bundle root path as the starting point for inferring the sync root path.
|
||||||
bundleRootPath := filepath.Clean(b.RootPath)
|
bundleRootPath := filepath.Clean(b.BundleRootPath)
|
||||||
|
|
||||||
// Infer the sync root path by looking at each one of the sync paths.
|
// Infer the sync root path by looking at each one of the sync paths.
|
||||||
// Every sync path must be a descendant of the final sync root path.
|
// Every sync path must be a descendant of the final sync root path.
|
||||||
|
|
|
@ -9,13 +9,14 @@ import (
|
||||||
"github.com/databricks/cli/bundle/config"
|
"github.com/databricks/cli/bundle/config"
|
||||||
"github.com/databricks/cli/bundle/config/mutator"
|
"github.com/databricks/cli/bundle/config/mutator"
|
||||||
"github.com/databricks/cli/bundle/internal/bundletest"
|
"github.com/databricks/cli/bundle/internal/bundletest"
|
||||||
|
"github.com/databricks/cli/libs/dyn"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestSyncInferRoot_NominalAbsolute(t *testing.T) {
|
func TestSyncInferRoot_NominalAbsolute(t *testing.T) {
|
||||||
b := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
RootPath: "/tmp/some/dir",
|
BundleRootPath: "/tmp/some/dir",
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
Sync: config.Sync{
|
Sync: config.Sync{
|
||||||
Paths: []string{
|
Paths: []string{
|
||||||
|
@ -46,7 +47,7 @@ func TestSyncInferRoot_NominalAbsolute(t *testing.T) {
|
||||||
|
|
||||||
func TestSyncInferRoot_NominalRelative(t *testing.T) {
|
func TestSyncInferRoot_NominalRelative(t *testing.T) {
|
||||||
b := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
RootPath: "./some/dir",
|
BundleRootPath: "./some/dir",
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
Sync: config.Sync{
|
Sync: config.Sync{
|
||||||
Paths: []string{
|
Paths: []string{
|
||||||
|
@ -77,7 +78,7 @@ func TestSyncInferRoot_NominalRelative(t *testing.T) {
|
||||||
|
|
||||||
func TestSyncInferRoot_ParentDirectory(t *testing.T) {
|
func TestSyncInferRoot_ParentDirectory(t *testing.T) {
|
||||||
b := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
RootPath: "/tmp/some/dir",
|
BundleRootPath: "/tmp/some/dir",
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
Sync: config.Sync{
|
Sync: config.Sync{
|
||||||
Paths: []string{
|
Paths: []string{
|
||||||
|
@ -108,7 +109,7 @@ func TestSyncInferRoot_ParentDirectory(t *testing.T) {
|
||||||
|
|
||||||
func TestSyncInferRoot_ManyParentDirectories(t *testing.T) {
|
func TestSyncInferRoot_ManyParentDirectories(t *testing.T) {
|
||||||
b := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
RootPath: "/tmp/some/dir/that/is/very/deeply/nested",
|
BundleRootPath: "/tmp/some/dir/that/is/very/deeply/nested",
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
Sync: config.Sync{
|
Sync: config.Sync{
|
||||||
Paths: []string{
|
Paths: []string{
|
||||||
|
@ -145,7 +146,7 @@ func TestSyncInferRoot_ManyParentDirectories(t *testing.T) {
|
||||||
|
|
||||||
func TestSyncInferRoot_MultiplePaths(t *testing.T) {
|
func TestSyncInferRoot_MultiplePaths(t *testing.T) {
|
||||||
b := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
RootPath: "/tmp/some/bundle/root",
|
BundleRootPath: "/tmp/some/bundle/root",
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
Sync: config.Sync{
|
Sync: config.Sync{
|
||||||
Paths: []string{
|
Paths: []string{
|
||||||
|
@ -172,7 +173,7 @@ func TestSyncInferRoot_MultiplePaths(t *testing.T) {
|
||||||
|
|
||||||
func TestSyncInferRoot_Error(t *testing.T) {
|
func TestSyncInferRoot_Error(t *testing.T) {
|
||||||
b := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
RootPath: "/tmp/some/dir",
|
BundleRootPath: "/tmp/some/dir",
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
Sync: config.Sync{
|
Sync: config.Sync{
|
||||||
Paths: []string{
|
Paths: []string{
|
||||||
|
@ -184,7 +185,7 @@ func TestSyncInferRoot_Error(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
bundletest.SetLocation(b, "sync.paths", "databricks.yml")
|
bundletest.SetLocation(b, "sync.paths", []dyn.Location{{File: "databricks.yml"}})
|
||||||
|
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
diags := bundle.Apply(ctx, b, mutator.SyncInferRoot())
|
diags := bundle.Apply(ctx, b, mutator.SyncInferRoot())
|
||||||
|
|
|
@ -4,97 +4,11 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"slices"
|
"slices"
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle/libraries"
|
"github.com/databricks/cli/bundle/config/mutator/paths"
|
||||||
|
|
||||||
"github.com/databricks/cli/libs/dyn"
|
"github.com/databricks/cli/libs/dyn"
|
||||||
)
|
)
|
||||||
|
|
||||||
type jobRewritePattern struct {
|
|
||||||
pattern dyn.Pattern
|
|
||||||
fn rewriteFunc
|
|
||||||
skipRewrite func(string) bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func noSkipRewrite(string) bool {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func rewritePatterns(t *translateContext, base dyn.Pattern) []jobRewritePattern {
|
|
||||||
return []jobRewritePattern{
|
|
||||||
{
|
|
||||||
base.Append(dyn.Key("notebook_task"), dyn.Key("notebook_path")),
|
|
||||||
t.translateNotebookPath,
|
|
||||||
noSkipRewrite,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
base.Append(dyn.Key("spark_python_task"), dyn.Key("python_file")),
|
|
||||||
t.translateFilePath,
|
|
||||||
noSkipRewrite,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
base.Append(dyn.Key("dbt_task"), dyn.Key("project_directory")),
|
|
||||||
t.translateDirectoryPath,
|
|
||||||
noSkipRewrite,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
base.Append(dyn.Key("sql_task"), dyn.Key("file"), dyn.Key("path")),
|
|
||||||
t.translateFilePath,
|
|
||||||
noSkipRewrite,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
base.Append(dyn.Key("libraries"), dyn.AnyIndex(), dyn.Key("whl")),
|
|
||||||
t.translateNoOp,
|
|
||||||
noSkipRewrite,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
base.Append(dyn.Key("libraries"), dyn.AnyIndex(), dyn.Key("jar")),
|
|
||||||
t.translateNoOp,
|
|
||||||
noSkipRewrite,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
base.Append(dyn.Key("libraries"), dyn.AnyIndex(), dyn.Key("requirements")),
|
|
||||||
t.translateFilePath,
|
|
||||||
noSkipRewrite,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *translateContext) jobRewritePatterns() []jobRewritePattern {
|
|
||||||
// Base pattern to match all tasks in all jobs.
|
|
||||||
base := dyn.NewPattern(
|
|
||||||
dyn.Key("resources"),
|
|
||||||
dyn.Key("jobs"),
|
|
||||||
dyn.AnyKey(),
|
|
||||||
dyn.Key("tasks"),
|
|
||||||
dyn.AnyIndex(),
|
|
||||||
)
|
|
||||||
|
|
||||||
// Compile list of patterns and their respective rewrite functions.
|
|
||||||
jobEnvironmentsPatterns := []jobRewritePattern{
|
|
||||||
{
|
|
||||||
dyn.NewPattern(
|
|
||||||
dyn.Key("resources"),
|
|
||||||
dyn.Key("jobs"),
|
|
||||||
dyn.AnyKey(),
|
|
||||||
dyn.Key("environments"),
|
|
||||||
dyn.AnyIndex(),
|
|
||||||
dyn.Key("spec"),
|
|
||||||
dyn.Key("dependencies"),
|
|
||||||
dyn.AnyIndex(),
|
|
||||||
),
|
|
||||||
t.translateNoOpWithPrefix,
|
|
||||||
func(s string) bool {
|
|
||||||
return !libraries.IsLibraryLocal(s)
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
taskPatterns := rewritePatterns(t, base)
|
|
||||||
forEachPatterns := rewritePatterns(t, base.Append(dyn.Key("for_each_task"), dyn.Key("task")))
|
|
||||||
allPatterns := append(taskPatterns, jobEnvironmentsPatterns...)
|
|
||||||
allPatterns = append(allPatterns, forEachPatterns...)
|
|
||||||
return allPatterns
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *translateContext) applyJobTranslations(v dyn.Value) (dyn.Value, error) {
|
func (t *translateContext) applyJobTranslations(v dyn.Value) (dyn.Value, error) {
|
||||||
var err error
|
var err error
|
||||||
|
|
||||||
|
@ -111,8 +25,7 @@ func (t *translateContext) applyJobTranslations(v dyn.Value) (dyn.Value, error)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, rewritePattern := range t.jobRewritePatterns() {
|
return paths.VisitJobPaths(v, func(p dyn.Path, kind paths.PathKind, v dyn.Value) (dyn.Value, error) {
|
||||||
v, err = dyn.MapByPattern(v, rewritePattern.pattern, func(p dyn.Path, v dyn.Value) (dyn.Value, error) {
|
|
||||||
key := p[2].Key()
|
key := p[2].Key()
|
||||||
|
|
||||||
// Skip path translation if the job is using git source.
|
// Skip path translation if the job is using git source.
|
||||||
|
@ -125,16 +38,28 @@ func (t *translateContext) applyJobTranslations(v dyn.Value) (dyn.Value, error)
|
||||||
return dyn.InvalidValue, fmt.Errorf("unable to determine directory for job %s: %w", key, err)
|
return dyn.InvalidValue, fmt.Errorf("unable to determine directory for job %s: %w", key, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
sv := v.MustString()
|
rewritePatternFn, err := t.getRewritePatternFn(kind)
|
||||||
if rewritePattern.skipRewrite(sv) {
|
|
||||||
return v, nil
|
|
||||||
}
|
|
||||||
return t.rewriteRelativeTo(p, v, rewritePattern.fn, dir, fallback[key])
|
|
||||||
})
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return dyn.InvalidValue, err
|
return dyn.InvalidValue, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return t.rewriteRelativeTo(p, v, rewritePatternFn, dir, fallback[key])
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *translateContext) getRewritePatternFn(kind paths.PathKind) (rewriteFunc, error) {
|
||||||
|
switch kind {
|
||||||
|
case paths.PathKindLibrary:
|
||||||
|
return t.translateNoOp, nil
|
||||||
|
case paths.PathKindNotebook:
|
||||||
|
return t.translateNotebookPath, nil
|
||||||
|
case paths.PathKindWorkspaceFile:
|
||||||
|
return t.translateFilePath, nil
|
||||||
|
case paths.PathKindDirectory:
|
||||||
|
return t.translateDirectoryPath, nil
|
||||||
|
case paths.PathKindWithPrefix:
|
||||||
|
return t.translateNoOpWithPrefix, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
return v, nil
|
return nil, fmt.Errorf("unsupported path kind: %d", kind)
|
||||||
}
|
}
|
||||||
|
|
|
@ -82,7 +82,7 @@ func TestTranslatePathsSkippedWithGitSource(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
bundletest.SetLocation(b, ".", filepath.Join(dir, "resource.yml"))
|
bundletest.SetLocation(b, ".", []dyn.Location{{File: filepath.Join(dir, "resource.yml")}})
|
||||||
|
|
||||||
diags := bundle.Apply(context.Background(), b, mutator.TranslatePaths())
|
diags := bundle.Apply(context.Background(), b, mutator.TranslatePaths())
|
||||||
require.NoError(t, diags.Error())
|
require.NoError(t, diags.Error())
|
||||||
|
@ -210,7 +210,7 @@ func TestTranslatePaths(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
bundletest.SetLocation(b, ".", filepath.Join(dir, "resource.yml"))
|
bundletest.SetLocation(b, ".", []dyn.Location{{File: filepath.Join(dir, "resource.yml")}})
|
||||||
|
|
||||||
diags := bundle.Apply(context.Background(), b, mutator.TranslatePaths())
|
diags := bundle.Apply(context.Background(), b, mutator.TranslatePaths())
|
||||||
require.NoError(t, diags.Error())
|
require.NoError(t, diags.Error())
|
||||||
|
@ -346,8 +346,8 @@ func TestTranslatePathsInSubdirectories(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
bundletest.SetLocation(b, "resources.jobs", filepath.Join(dir, "job/resource.yml"))
|
bundletest.SetLocation(b, "resources.jobs", []dyn.Location{{File: filepath.Join(dir, "job/resource.yml")}})
|
||||||
bundletest.SetLocation(b, "resources.pipelines", filepath.Join(dir, "pipeline/resource.yml"))
|
bundletest.SetLocation(b, "resources.pipelines", []dyn.Location{{File: filepath.Join(dir, "pipeline/resource.yml")}})
|
||||||
|
|
||||||
diags := bundle.Apply(context.Background(), b, mutator.TranslatePaths())
|
diags := bundle.Apply(context.Background(), b, mutator.TranslatePaths())
|
||||||
require.NoError(t, diags.Error())
|
require.NoError(t, diags.Error())
|
||||||
|
@ -408,7 +408,7 @@ func TestTranslatePathsOutsideSyncRoot(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
bundletest.SetLocation(b, ".", filepath.Join(dir, "../resource.yml"))
|
bundletest.SetLocation(b, ".", []dyn.Location{{File: filepath.Join(dir, "../resource.yml")}})
|
||||||
|
|
||||||
diags := bundle.Apply(context.Background(), b, mutator.TranslatePaths())
|
diags := bundle.Apply(context.Background(), b, mutator.TranslatePaths())
|
||||||
assert.ErrorContains(t, diags.Error(), "is not contained in sync root path")
|
assert.ErrorContains(t, diags.Error(), "is not contained in sync root path")
|
||||||
|
@ -439,7 +439,7 @@ func TestJobNotebookDoesNotExistError(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
bundletest.SetLocation(b, ".", filepath.Join(dir, "fake.yml"))
|
bundletest.SetLocation(b, ".", []dyn.Location{{File: filepath.Join(dir, "fake.yml")}})
|
||||||
|
|
||||||
diags := bundle.Apply(context.Background(), b, mutator.TranslatePaths())
|
diags := bundle.Apply(context.Background(), b, mutator.TranslatePaths())
|
||||||
assert.EqualError(t, diags.Error(), "notebook ./doesnt_exist.py not found")
|
assert.EqualError(t, diags.Error(), "notebook ./doesnt_exist.py not found")
|
||||||
|
@ -470,7 +470,7 @@ func TestJobFileDoesNotExistError(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
bundletest.SetLocation(b, ".", filepath.Join(dir, "fake.yml"))
|
bundletest.SetLocation(b, ".", []dyn.Location{{File: filepath.Join(dir, "fake.yml")}})
|
||||||
|
|
||||||
diags := bundle.Apply(context.Background(), b, mutator.TranslatePaths())
|
diags := bundle.Apply(context.Background(), b, mutator.TranslatePaths())
|
||||||
assert.EqualError(t, diags.Error(), "file ./doesnt_exist.py not found")
|
assert.EqualError(t, diags.Error(), "file ./doesnt_exist.py not found")
|
||||||
|
@ -501,7 +501,7 @@ func TestPipelineNotebookDoesNotExistError(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
bundletest.SetLocation(b, ".", filepath.Join(dir, "fake.yml"))
|
bundletest.SetLocation(b, ".", []dyn.Location{{File: filepath.Join(dir, "fake.yml")}})
|
||||||
|
|
||||||
diags := bundle.Apply(context.Background(), b, mutator.TranslatePaths())
|
diags := bundle.Apply(context.Background(), b, mutator.TranslatePaths())
|
||||||
assert.EqualError(t, diags.Error(), "notebook ./doesnt_exist.py not found")
|
assert.EqualError(t, diags.Error(), "notebook ./doesnt_exist.py not found")
|
||||||
|
@ -532,7 +532,7 @@ func TestPipelineFileDoesNotExistError(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
bundletest.SetLocation(b, ".", filepath.Join(dir, "fake.yml"))
|
bundletest.SetLocation(b, ".", []dyn.Location{{File: filepath.Join(dir, "fake.yml")}})
|
||||||
|
|
||||||
diags := bundle.Apply(context.Background(), b, mutator.TranslatePaths())
|
diags := bundle.Apply(context.Background(), b, mutator.TranslatePaths())
|
||||||
assert.EqualError(t, diags.Error(), "file ./doesnt_exist.py not found")
|
assert.EqualError(t, diags.Error(), "file ./doesnt_exist.py not found")
|
||||||
|
@ -567,7 +567,7 @@ func TestJobSparkPythonTaskWithNotebookSourceError(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
bundletest.SetLocation(b, ".", filepath.Join(dir, "resource.yml"))
|
bundletest.SetLocation(b, ".", []dyn.Location{{File: filepath.Join(dir, "resource.yml")}})
|
||||||
|
|
||||||
diags := bundle.Apply(context.Background(), b, mutator.TranslatePaths())
|
diags := bundle.Apply(context.Background(), b, mutator.TranslatePaths())
|
||||||
assert.ErrorContains(t, diags.Error(), `expected a file for "resources.jobs.job.tasks[0].spark_python_task.python_file" but got a notebook`)
|
assert.ErrorContains(t, diags.Error(), `expected a file for "resources.jobs.job.tasks[0].spark_python_task.python_file" but got a notebook`)
|
||||||
|
@ -602,7 +602,7 @@ func TestJobNotebookTaskWithFileSourceError(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
bundletest.SetLocation(b, ".", filepath.Join(dir, "resource.yml"))
|
bundletest.SetLocation(b, ".", []dyn.Location{{File: filepath.Join(dir, "resource.yml")}})
|
||||||
|
|
||||||
diags := bundle.Apply(context.Background(), b, mutator.TranslatePaths())
|
diags := bundle.Apply(context.Background(), b, mutator.TranslatePaths())
|
||||||
assert.ErrorContains(t, diags.Error(), `expected a notebook for "resources.jobs.job.tasks[0].notebook_task.notebook_path" but got a file`)
|
assert.ErrorContains(t, diags.Error(), `expected a notebook for "resources.jobs.job.tasks[0].notebook_task.notebook_path" but got a file`)
|
||||||
|
@ -637,7 +637,7 @@ func TestPipelineNotebookLibraryWithFileSourceError(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
bundletest.SetLocation(b, ".", filepath.Join(dir, "resource.yml"))
|
bundletest.SetLocation(b, ".", []dyn.Location{{File: filepath.Join(dir, "resource.yml")}})
|
||||||
|
|
||||||
diags := bundle.Apply(context.Background(), b, mutator.TranslatePaths())
|
diags := bundle.Apply(context.Background(), b, mutator.TranslatePaths())
|
||||||
assert.ErrorContains(t, diags.Error(), `expected a notebook for "resources.pipelines.pipeline.libraries[0].notebook.path" but got a file`)
|
assert.ErrorContains(t, diags.Error(), `expected a notebook for "resources.pipelines.pipeline.libraries[0].notebook.path" but got a file`)
|
||||||
|
@ -672,7 +672,7 @@ func TestPipelineFileLibraryWithNotebookSourceError(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
bundletest.SetLocation(b, ".", filepath.Join(dir, "resource.yml"))
|
bundletest.SetLocation(b, ".", []dyn.Location{{File: filepath.Join(dir, "resource.yml")}})
|
||||||
|
|
||||||
diags := bundle.Apply(context.Background(), b, mutator.TranslatePaths())
|
diags := bundle.Apply(context.Background(), b, mutator.TranslatePaths())
|
||||||
assert.ErrorContains(t, diags.Error(), `expected a file for "resources.pipelines.pipeline.libraries[0].file.path" but got a notebook`)
|
assert.ErrorContains(t, diags.Error(), `expected a file for "resources.pipelines.pipeline.libraries[0].file.path" but got a notebook`)
|
||||||
|
@ -710,7 +710,7 @@ func TestTranslatePathJobEnvironments(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
bundletest.SetLocation(b, "resources.jobs", filepath.Join(dir, "job/resource.yml"))
|
bundletest.SetLocation(b, "resources.jobs", []dyn.Location{{File: filepath.Join(dir, "job/resource.yml")}})
|
||||||
|
|
||||||
diags := bundle.Apply(context.Background(), b, mutator.TranslatePaths())
|
diags := bundle.Apply(context.Background(), b, mutator.TranslatePaths())
|
||||||
require.NoError(t, diags.Error())
|
require.NoError(t, diags.Error())
|
||||||
|
@ -753,8 +753,8 @@ func TestTranslatePathWithComplexVariables(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
bundletest.SetLocation(b, "variables", filepath.Join(dir, "variables/variables.yml"))
|
bundletest.SetLocation(b, "variables", []dyn.Location{{File: filepath.Join(dir, "variables/variables.yml")}})
|
||||||
bundletest.SetLocation(b, "resources.jobs", filepath.Join(dir, "job/resource.yml"))
|
bundletest.SetLocation(b, "resources.jobs", []dyn.Location{{File: filepath.Join(dir, "job/resource.yml")}})
|
||||||
|
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
// Assign the variables to the dynamic configuration.
|
// Assign the variables to the dynamic configuration.
|
||||||
|
|
|
@ -19,6 +19,7 @@ type Resources struct {
|
||||||
RegisteredModels map[string]*resources.RegisteredModel `json:"registered_models,omitempty"`
|
RegisteredModels map[string]*resources.RegisteredModel `json:"registered_models,omitempty"`
|
||||||
QualityMonitors map[string]*resources.QualityMonitor `json:"quality_monitors,omitempty"`
|
QualityMonitors map[string]*resources.QualityMonitor `json:"quality_monitors,omitempty"`
|
||||||
Schemas map[string]*resources.Schema `json:"schemas,omitempty"`
|
Schemas map[string]*resources.Schema `json:"schemas,omitempty"`
|
||||||
|
Clusters map[string]*resources.Cluster `json:"clusters,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type ConfigResource interface {
|
type ConfigResource interface {
|
||||||
|
|
|
@ -0,0 +1,39 @@
|
||||||
|
package resources
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
"github.com/databricks/cli/libs/log"
|
||||||
|
"github.com/databricks/databricks-sdk-go"
|
||||||
|
"github.com/databricks/databricks-sdk-go/marshal"
|
||||||
|
"github.com/databricks/databricks-sdk-go/service/compute"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Cluster struct {
|
||||||
|
ID string `json:"id,omitempty" bundle:"readonly"`
|
||||||
|
Permissions []Permission `json:"permissions,omitempty"`
|
||||||
|
ModifiedStatus ModifiedStatus `json:"modified_status,omitempty" bundle:"internal"`
|
||||||
|
|
||||||
|
*compute.ClusterSpec
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Cluster) UnmarshalJSON(b []byte) error {
|
||||||
|
return marshal.Unmarshal(b, s)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s Cluster) MarshalJSON() ([]byte, error) {
|
||||||
|
return marshal.Marshal(s)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Cluster) Exists(ctx context.Context, w *databricks.WorkspaceClient, id string) (bool, error) {
|
||||||
|
_, err := w.Clusters.GetByClusterId(ctx, id)
|
||||||
|
if err != nil {
|
||||||
|
log.Debugf(ctx, "cluster %s does not exist", id)
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Cluster) TerraformResourceName() string {
|
||||||
|
return "databricks_cluster"
|
||||||
|
}
|
|
@ -366,9 +366,9 @@ func (r *Root) MergeTargetOverrides(name string) error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Merge `compute_id`. This field must be overwritten if set, not merged.
|
// Merge `cluster_id`. This field must be overwritten if set, not merged.
|
||||||
if v := target.Get("compute_id"); v.Kind() != dyn.KindInvalid {
|
if v := target.Get("cluster_id"); v.Kind() != dyn.KindInvalid {
|
||||||
root, err = dyn.SetByPath(root, dyn.NewPath(dyn.Key("bundle"), dyn.Key("compute_id")), v)
|
root, err = dyn.SetByPath(root, dyn.NewPath(dyn.Key("bundle"), dyn.Key("cluster_id")), v)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -406,23 +406,45 @@ func (r *Root) MergeTargetOverrides(name string) error {
|
||||||
return r.updateWithDynamicValue(root)
|
return r.updateWithDynamicValue(root)
|
||||||
}
|
}
|
||||||
|
|
||||||
var variableKeywords = []string{"default", "lookup"}
|
var allowedVariableDefinitions = []([]string){
|
||||||
|
{"default", "type", "description"},
|
||||||
|
{"default", "type"},
|
||||||
|
{"default", "description"},
|
||||||
|
{"lookup", "description"},
|
||||||
|
{"default"},
|
||||||
|
{"lookup"},
|
||||||
|
}
|
||||||
|
|
||||||
// isFullVariableOverrideDef checks if the given value is a full syntax varaible override.
|
// isFullVariableOverrideDef checks if the given value is a full syntax varaible override.
|
||||||
// A full syntax variable override is a map with only one of the following
|
// A full syntax variable override is a map with either 1 of 2 keys.
|
||||||
// keys: "default", "lookup".
|
// If it's 2 keys, the keys should be "default" and "type".
|
||||||
|
// If it's 1 key, the key should be one of the following keys: "default", "lookup".
|
||||||
func isFullVariableOverrideDef(v dyn.Value) bool {
|
func isFullVariableOverrideDef(v dyn.Value) bool {
|
||||||
mv, ok := v.AsMap()
|
mv, ok := v.AsMap()
|
||||||
if !ok {
|
if !ok {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
if mv.Len() != 1 {
|
// If the map has more than 3 keys, it is not a full variable override.
|
||||||
|
if mv.Len() > 3 {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, keyword := range variableKeywords {
|
for _, keys := range allowedVariableDefinitions {
|
||||||
if _, ok := mv.GetByString(keyword); ok {
|
if len(keys) != mv.Len() {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if the keys are the same.
|
||||||
|
match := true
|
||||||
|
for _, key := range keys {
|
||||||
|
if _, ok := mv.GetByString(key); !ok {
|
||||||
|
match = false
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if match {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -6,6 +6,7 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle/config/variable"
|
"github.com/databricks/cli/bundle/config/variable"
|
||||||
|
"github.com/databricks/cli/libs/dyn"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
@ -169,3 +170,87 @@ func TestRootMergeTargetOverridesWithVariables(t *testing.T) {
|
||||||
assert.Equal(t, "complex var", root.Variables["complex"].Description)
|
assert.Equal(t, "complex var", root.Variables["complex"].Description)
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestIsFullVariableOverrideDef(t *testing.T) {
|
||||||
|
testCases := []struct {
|
||||||
|
value dyn.Value
|
||||||
|
expected bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
value: dyn.V(map[string]dyn.Value{
|
||||||
|
"type": dyn.V("string"),
|
||||||
|
"default": dyn.V("foo"),
|
||||||
|
"description": dyn.V("foo var"),
|
||||||
|
}),
|
||||||
|
expected: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
value: dyn.V(map[string]dyn.Value{
|
||||||
|
"type": dyn.V("string"),
|
||||||
|
"lookup": dyn.V("foo"),
|
||||||
|
"description": dyn.V("foo var"),
|
||||||
|
}),
|
||||||
|
expected: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
value: dyn.V(map[string]dyn.Value{
|
||||||
|
"type": dyn.V("string"),
|
||||||
|
"default": dyn.V("foo"),
|
||||||
|
}),
|
||||||
|
expected: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
value: dyn.V(map[string]dyn.Value{
|
||||||
|
"type": dyn.V("string"),
|
||||||
|
"lookup": dyn.V("foo"),
|
||||||
|
}),
|
||||||
|
expected: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
value: dyn.V(map[string]dyn.Value{
|
||||||
|
"description": dyn.V("string"),
|
||||||
|
"default": dyn.V("foo"),
|
||||||
|
}),
|
||||||
|
expected: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
value: dyn.V(map[string]dyn.Value{
|
||||||
|
"description": dyn.V("string"),
|
||||||
|
"lookup": dyn.V("foo"),
|
||||||
|
}),
|
||||||
|
expected: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
value: dyn.V(map[string]dyn.Value{
|
||||||
|
"default": dyn.V("foo"),
|
||||||
|
}),
|
||||||
|
expected: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
value: dyn.V(map[string]dyn.Value{
|
||||||
|
"lookup": dyn.V("foo"),
|
||||||
|
}),
|
||||||
|
expected: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
value: dyn.V(map[string]dyn.Value{
|
||||||
|
"type": dyn.V("string"),
|
||||||
|
}),
|
||||||
|
expected: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
value: dyn.V(map[string]dyn.Value{
|
||||||
|
"type": dyn.V("string"),
|
||||||
|
"default": dyn.V("foo"),
|
||||||
|
"description": dyn.V("foo var"),
|
||||||
|
"lookup": dyn.V("foo"),
|
||||||
|
}),
|
||||||
|
expected: false,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, tc := range testCases {
|
||||||
|
assert.Equal(t, tc.expected, isFullVariableOverrideDef(tc.value), "test case %d", i)
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
|
@ -24,8 +24,11 @@ type Target struct {
|
||||||
// name prefix of deployed resources.
|
// name prefix of deployed resources.
|
||||||
Presets Presets `json:"presets,omitempty"`
|
Presets Presets `json:"presets,omitempty"`
|
||||||
|
|
||||||
// Overrides the compute used for jobs and other supported assets.
|
// DEPRECATED: Overrides the compute used for jobs and other supported assets.
|
||||||
ComputeID string `json:"compute_id,omitempty"`
|
ComputeId string `json:"compute_id,omitempty"`
|
||||||
|
|
||||||
|
// Overrides the cluster used for jobs and other supported assets.
|
||||||
|
ClusterId string `json:"cluster_id,omitempty"`
|
||||||
|
|
||||||
Bundle *Bundle `json:"bundle,omitempty"`
|
Bundle *Bundle `json:"bundle,omitempty"`
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,161 @@
|
||||||
|
package validate
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/databricks/cli/bundle"
|
||||||
|
"github.com/databricks/cli/libs/diag"
|
||||||
|
"github.com/databricks/cli/libs/dyn"
|
||||||
|
"github.com/databricks/databricks-sdk-go/service/jobs"
|
||||||
|
)
|
||||||
|
|
||||||
|
// JobTaskClusterSpec validates that job tasks have cluster spec defined
|
||||||
|
// if task requires a cluster
|
||||||
|
func JobTaskClusterSpec() bundle.ReadOnlyMutator {
|
||||||
|
return &jobTaskClusterSpec{}
|
||||||
|
}
|
||||||
|
|
||||||
|
type jobTaskClusterSpec struct {
|
||||||
|
}
|
||||||
|
|
||||||
|
func (v *jobTaskClusterSpec) Name() string {
|
||||||
|
return "validate:job_task_cluster_spec"
|
||||||
|
}
|
||||||
|
|
||||||
|
func (v *jobTaskClusterSpec) Apply(ctx context.Context, rb bundle.ReadOnlyBundle) diag.Diagnostics {
|
||||||
|
diags := diag.Diagnostics{}
|
||||||
|
|
||||||
|
jobsPath := dyn.NewPath(dyn.Key("resources"), dyn.Key("jobs"))
|
||||||
|
|
||||||
|
for resourceName, job := range rb.Config().Resources.Jobs {
|
||||||
|
resourcePath := jobsPath.Append(dyn.Key(resourceName))
|
||||||
|
|
||||||
|
for taskIndex, task := range job.Tasks {
|
||||||
|
taskPath := resourcePath.Append(dyn.Key("tasks"), dyn.Index(taskIndex))
|
||||||
|
|
||||||
|
diags = diags.Extend(validateJobTask(rb, task, taskPath))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return diags
|
||||||
|
}
|
||||||
|
|
||||||
|
func validateJobTask(rb bundle.ReadOnlyBundle, task jobs.Task, taskPath dyn.Path) diag.Diagnostics {
|
||||||
|
diags := diag.Diagnostics{}
|
||||||
|
|
||||||
|
var specified []string
|
||||||
|
var unspecified []string
|
||||||
|
|
||||||
|
if task.JobClusterKey != "" {
|
||||||
|
specified = append(specified, "job_cluster_key")
|
||||||
|
} else {
|
||||||
|
unspecified = append(unspecified, "job_cluster_key")
|
||||||
|
}
|
||||||
|
|
||||||
|
if task.EnvironmentKey != "" {
|
||||||
|
specified = append(specified, "environment_key")
|
||||||
|
} else {
|
||||||
|
unspecified = append(unspecified, "environment_key")
|
||||||
|
}
|
||||||
|
|
||||||
|
if task.ExistingClusterId != "" {
|
||||||
|
specified = append(specified, "existing_cluster_id")
|
||||||
|
} else {
|
||||||
|
unspecified = append(unspecified, "existing_cluster_id")
|
||||||
|
}
|
||||||
|
|
||||||
|
if task.NewCluster != nil {
|
||||||
|
specified = append(specified, "new_cluster")
|
||||||
|
} else {
|
||||||
|
unspecified = append(unspecified, "new_cluster")
|
||||||
|
}
|
||||||
|
|
||||||
|
if task.ForEachTask != nil {
|
||||||
|
forEachTaskPath := taskPath.Append(dyn.Key("for_each_task"), dyn.Key("task"))
|
||||||
|
|
||||||
|
diags = diags.Extend(validateJobTask(rb, task.ForEachTask.Task, forEachTaskPath))
|
||||||
|
}
|
||||||
|
|
||||||
|
if isComputeTask(task) && len(specified) == 0 {
|
||||||
|
if task.NotebookTask != nil {
|
||||||
|
// notebook tasks without cluster spec will use notebook environment
|
||||||
|
} else {
|
||||||
|
// path might be not very helpful, adding user-specified task key clarifies the context
|
||||||
|
detail := fmt.Sprintf(
|
||||||
|
"Task %q requires a cluster or an environment to run.\nSpecify one of the following fields: %s.",
|
||||||
|
task.TaskKey,
|
||||||
|
strings.Join(unspecified, ", "),
|
||||||
|
)
|
||||||
|
|
||||||
|
diags = diags.Append(diag.Diagnostic{
|
||||||
|
Severity: diag.Error,
|
||||||
|
Summary: "Missing required cluster or environment settings",
|
||||||
|
Detail: detail,
|
||||||
|
Locations: rb.Config().GetLocations(taskPath.String()),
|
||||||
|
Paths: []dyn.Path{taskPath},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return diags
|
||||||
|
}
|
||||||
|
|
||||||
|
// isComputeTask returns true if the task runs on a cluster or serverless GC
|
||||||
|
func isComputeTask(task jobs.Task) bool {
|
||||||
|
if task.NotebookTask != nil {
|
||||||
|
// if warehouse_id is set, it's SQL notebook that doesn't need cluster or serverless GC
|
||||||
|
if task.NotebookTask.WarehouseId != "" {
|
||||||
|
return false
|
||||||
|
} else {
|
||||||
|
// task settings don't require specifying a cluster/serverless GC, but task itself can run on one
|
||||||
|
// we handle that case separately in validateJobTask
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if task.PythonWheelTask != nil {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
if task.DbtTask != nil {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
if task.SparkJarTask != nil {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
if task.SparkSubmitTask != nil {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
if task.SparkPythonTask != nil {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
if task.SqlTask != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if task.PipelineTask != nil {
|
||||||
|
// while pipelines use clusters, pipeline tasks don't, they only trigger pipelines
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if task.RunJobTask != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if task.ConditionTask != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// for each task doesn't use clusters, underlying task(s) can though
|
||||||
|
if task.ForEachTask != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
return false
|
||||||
|
}
|
|
@ -0,0 +1,203 @@
|
||||||
|
package validate
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/databricks/cli/bundle"
|
||||||
|
"github.com/databricks/cli/bundle/config"
|
||||||
|
"github.com/databricks/cli/bundle/config/resources"
|
||||||
|
"github.com/databricks/databricks-sdk-go/service/compute"
|
||||||
|
"github.com/databricks/databricks-sdk-go/service/jobs"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestJobTaskClusterSpec(t *testing.T) {
|
||||||
|
expectedSummary := "Missing required cluster or environment settings"
|
||||||
|
|
||||||
|
type testCase struct {
|
||||||
|
name string
|
||||||
|
task jobs.Task
|
||||||
|
errorPath string
|
||||||
|
errorDetail string
|
||||||
|
errorSummary string
|
||||||
|
}
|
||||||
|
|
||||||
|
testCases := []testCase{
|
||||||
|
{
|
||||||
|
name: "valid notebook task",
|
||||||
|
task: jobs.Task{
|
||||||
|
// while a cluster is needed, it will use notebook environment to create one
|
||||||
|
NotebookTask: &jobs.NotebookTask{},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "valid notebook task (job_cluster_key)",
|
||||||
|
task: jobs.Task{
|
||||||
|
JobClusterKey: "cluster1",
|
||||||
|
NotebookTask: &jobs.NotebookTask{},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "valid notebook task (new_cluster)",
|
||||||
|
task: jobs.Task{
|
||||||
|
NewCluster: &compute.ClusterSpec{},
|
||||||
|
NotebookTask: &jobs.NotebookTask{},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "valid notebook task (existing_cluster_id)",
|
||||||
|
task: jobs.Task{
|
||||||
|
ExistingClusterId: "cluster1",
|
||||||
|
NotebookTask: &jobs.NotebookTask{},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "valid SQL notebook task",
|
||||||
|
task: jobs.Task{
|
||||||
|
NotebookTask: &jobs.NotebookTask{
|
||||||
|
WarehouseId: "warehouse1",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "valid python wheel task",
|
||||||
|
task: jobs.Task{
|
||||||
|
JobClusterKey: "cluster1",
|
||||||
|
PythonWheelTask: &jobs.PythonWheelTask{},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "valid python wheel task (environment_key)",
|
||||||
|
task: jobs.Task{
|
||||||
|
EnvironmentKey: "environment1",
|
||||||
|
PythonWheelTask: &jobs.PythonWheelTask{},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "valid dbt task",
|
||||||
|
task: jobs.Task{
|
||||||
|
JobClusterKey: "cluster1",
|
||||||
|
DbtTask: &jobs.DbtTask{},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "valid spark jar task",
|
||||||
|
task: jobs.Task{
|
||||||
|
JobClusterKey: "cluster1",
|
||||||
|
SparkJarTask: &jobs.SparkJarTask{},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "valid spark submit",
|
||||||
|
task: jobs.Task{
|
||||||
|
NewCluster: &compute.ClusterSpec{},
|
||||||
|
SparkSubmitTask: &jobs.SparkSubmitTask{},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "valid spark python task",
|
||||||
|
task: jobs.Task{
|
||||||
|
JobClusterKey: "cluster1",
|
||||||
|
SparkPythonTask: &jobs.SparkPythonTask{},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "valid SQL task",
|
||||||
|
task: jobs.Task{
|
||||||
|
SqlTask: &jobs.SqlTask{},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "valid pipeline task",
|
||||||
|
task: jobs.Task{
|
||||||
|
PipelineTask: &jobs.PipelineTask{},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "valid run job task",
|
||||||
|
task: jobs.Task{
|
||||||
|
RunJobTask: &jobs.RunJobTask{},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "valid condition task",
|
||||||
|
task: jobs.Task{
|
||||||
|
ConditionTask: &jobs.ConditionTask{},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "valid for each task",
|
||||||
|
task: jobs.Task{
|
||||||
|
ForEachTask: &jobs.ForEachTask{
|
||||||
|
Task: jobs.Task{
|
||||||
|
JobClusterKey: "cluster1",
|
||||||
|
NotebookTask: &jobs.NotebookTask{},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "invalid python wheel task",
|
||||||
|
task: jobs.Task{
|
||||||
|
PythonWheelTask: &jobs.PythonWheelTask{},
|
||||||
|
TaskKey: "my_task",
|
||||||
|
},
|
||||||
|
errorPath: "resources.jobs.job1.tasks[0]",
|
||||||
|
errorDetail: `Task "my_task" requires a cluster or an environment to run.
|
||||||
|
Specify one of the following fields: job_cluster_key, environment_key, existing_cluster_id, new_cluster.`,
|
||||||
|
errorSummary: expectedSummary,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "invalid for each task",
|
||||||
|
task: jobs.Task{
|
||||||
|
ForEachTask: &jobs.ForEachTask{
|
||||||
|
Task: jobs.Task{
|
||||||
|
PythonWheelTask: &jobs.PythonWheelTask{},
|
||||||
|
TaskKey: "my_task",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
errorPath: "resources.jobs.job1.tasks[0].for_each_task.task",
|
||||||
|
errorDetail: `Task "my_task" requires a cluster or an environment to run.
|
||||||
|
Specify one of the following fields: job_cluster_key, environment_key, existing_cluster_id, new_cluster.`,
|
||||||
|
errorSummary: expectedSummary,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range testCases {
|
||||||
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
|
job := &resources.Job{
|
||||||
|
JobSettings: &jobs.JobSettings{
|
||||||
|
Tasks: []jobs.Task{tc.task},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
b := createBundle(map[string]*resources.Job{"job1": job})
|
||||||
|
diags := bundle.ApplyReadOnly(context.Background(), bundle.ReadOnly(b), JobTaskClusterSpec())
|
||||||
|
|
||||||
|
if tc.errorPath != "" || tc.errorDetail != "" || tc.errorSummary != "" {
|
||||||
|
assert.Len(t, diags, 1)
|
||||||
|
assert.Len(t, diags[0].Paths, 1)
|
||||||
|
|
||||||
|
diag := diags[0]
|
||||||
|
|
||||||
|
assert.Equal(t, tc.errorPath, diag.Paths[0].String())
|
||||||
|
assert.Equal(t, tc.errorSummary, diag.Summary)
|
||||||
|
assert.Equal(t, tc.errorDetail, diag.Detail)
|
||||||
|
} else {
|
||||||
|
assert.ElementsMatch(t, []string{}, diags)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func createBundle(jobs map[string]*resources.Job) *bundle.Bundle {
|
||||||
|
return &bundle.Bundle{
|
||||||
|
Config: config.Root{
|
||||||
|
Resources: config.Resources{
|
||||||
|
Jobs: jobs,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
|
@ -34,6 +34,7 @@ func (v *validate) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics
|
||||||
JobClusterKeyDefined(),
|
JobClusterKeyDefined(),
|
||||||
FilesToSync(),
|
FilesToSync(),
|
||||||
ValidateSyncPatterns(),
|
ValidateSyncPatterns(),
|
||||||
|
JobTaskClusterSpec(),
|
||||||
))
|
))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -47,13 +47,18 @@ type Workspace struct {
|
||||||
|
|
||||||
// Remote workspace base path for deployment state, for artifacts, as synchronization target.
|
// Remote workspace base path for deployment state, for artifacts, as synchronization target.
|
||||||
// This defaults to "~/.bundle/${bundle.name}/${bundle.target}" where "~" expands to
|
// This defaults to "~/.bundle/${bundle.name}/${bundle.target}" where "~" expands to
|
||||||
// the current user's home directory in the workspace (e.g. `/Users/jane@doe.com`).
|
// the current user's home directory in the workspace (e.g. `/Workspace/Users/jane@doe.com`).
|
||||||
RootPath string `json:"root_path,omitempty"`
|
RootPath string `json:"root_path,omitempty"`
|
||||||
|
|
||||||
// Remote workspace path to synchronize local files to.
|
// Remote workspace path to synchronize local files to.
|
||||||
// This defaults to "${workspace.root}/files".
|
// This defaults to "${workspace.root}/files".
|
||||||
FilePath string `json:"file_path,omitempty"`
|
FilePath string `json:"file_path,omitempty"`
|
||||||
|
|
||||||
|
// Remote workspace path for resources with a presence in the workspace.
|
||||||
|
// These are kept outside [FilePath] to avoid potential naming collisions.
|
||||||
|
// This defaults to "${workspace.root}/resources".
|
||||||
|
ResourcePath string `json:"resource_path,omitempty"`
|
||||||
|
|
||||||
// Remote workspace path for build artifacts.
|
// Remote workspace path for build artifacts.
|
||||||
// This defaults to "${workspace.root}/artifacts".
|
// This defaults to "${workspace.root}/artifacts".
|
||||||
ArtifactPath string `json:"artifact_path,omitempty"`
|
ArtifactPath string `json:"artifact_path,omitempty"`
|
||||||
|
|
|
@ -8,9 +8,12 @@ import (
|
||||||
"github.com/databricks/cli/libs/cmdio"
|
"github.com/databricks/cli/libs/cmdio"
|
||||||
"github.com/databricks/cli/libs/diag"
|
"github.com/databricks/cli/libs/diag"
|
||||||
"github.com/databricks/cli/libs/log"
|
"github.com/databricks/cli/libs/log"
|
||||||
|
"github.com/databricks/cli/libs/sync"
|
||||||
)
|
)
|
||||||
|
|
||||||
type upload struct{}
|
type upload struct {
|
||||||
|
outputHandler sync.OutputHandler
|
||||||
|
}
|
||||||
|
|
||||||
func (m *upload) Name() string {
|
func (m *upload) Name() string {
|
||||||
return "files.Upload"
|
return "files.Upload"
|
||||||
|
@ -18,11 +21,18 @@ func (m *upload) Name() string {
|
||||||
|
|
||||||
func (m *upload) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
func (m *upload) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||||
cmdio.LogString(ctx, fmt.Sprintf("Uploading bundle files to %s...", b.Config.Workspace.FilePath))
|
cmdio.LogString(ctx, fmt.Sprintf("Uploading bundle files to %s...", b.Config.Workspace.FilePath))
|
||||||
sync, err := GetSync(ctx, bundle.ReadOnly(b))
|
opts, err := GetSyncOptions(ctx, bundle.ReadOnly(b))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return diag.FromErr(err)
|
return diag.FromErr(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
opts.OutputHandler = m.outputHandler
|
||||||
|
sync, err := sync.New(ctx, *opts)
|
||||||
|
if err != nil {
|
||||||
|
return diag.FromErr(err)
|
||||||
|
}
|
||||||
|
defer sync.Close()
|
||||||
|
|
||||||
b.Files, err = sync.RunOnce(ctx)
|
b.Files, err = sync.RunOnce(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return diag.FromErr(err)
|
return diag.FromErr(err)
|
||||||
|
@ -32,6 +42,6 @@ func (m *upload) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func Upload() bundle.Mutator {
|
func Upload(outputHandler sync.OutputHandler) bundle.Mutator {
|
||||||
return &upload{}
|
return &upload{outputHandler}
|
||||||
}
|
}
|
||||||
|
|
|
@ -40,7 +40,7 @@ func (m *compute) Apply(_ context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||||
// Compute config file path the job is defined in, relative to the bundle
|
// Compute config file path the job is defined in, relative to the bundle
|
||||||
// root
|
// root
|
||||||
l := b.Config.GetLocation("resources.jobs." + name)
|
l := b.Config.GetLocation("resources.jobs." + name)
|
||||||
relativePath, err := filepath.Rel(b.RootPath, l.File)
|
relativePath, err := filepath.Rel(b.BundleRootPath, l.File)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return diag.Errorf("failed to compute relative path for job %s: %v", name, err)
|
return diag.Errorf("failed to compute relative path for job %s: %v", name, err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -9,6 +9,7 @@ import (
|
||||||
"github.com/databricks/cli/bundle/config/resources"
|
"github.com/databricks/cli/bundle/config/resources"
|
||||||
"github.com/databricks/cli/bundle/internal/bundletest"
|
"github.com/databricks/cli/bundle/internal/bundletest"
|
||||||
"github.com/databricks/cli/bundle/metadata"
|
"github.com/databricks/cli/bundle/metadata"
|
||||||
|
"github.com/databricks/cli/libs/dyn"
|
||||||
"github.com/databricks/databricks-sdk-go/service/jobs"
|
"github.com/databricks/databricks-sdk-go/service/jobs"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
@ -55,9 +56,9 @@ func TestComputeMetadataMutator(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
bundletest.SetLocation(b, "resources.jobs.my-job-1", "a/b/c")
|
bundletest.SetLocation(b, "resources.jobs.my-job-1", []dyn.Location{{File: "a/b/c"}})
|
||||||
bundletest.SetLocation(b, "resources.jobs.my-job-2", "d/e/f")
|
bundletest.SetLocation(b, "resources.jobs.my-job-2", []dyn.Location{{File: "d/e/f"}})
|
||||||
bundletest.SetLocation(b, "resources.pipelines.my-pipeline", "abc")
|
bundletest.SetLocation(b, "resources.pipelines.my-pipeline", []dyn.Location{{File: "abc"}})
|
||||||
|
|
||||||
expectedMetadata := metadata.Metadata{
|
expectedMetadata := metadata.Metadata{
|
||||||
Version: metadata.Version,
|
Version: metadata.Version,
|
||||||
|
|
|
@ -62,7 +62,7 @@ func testStatePull(t *testing.T, opts statePullOpts) {
|
||||||
|
|
||||||
tmpDir := t.TempDir()
|
tmpDir := t.TempDir()
|
||||||
b := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
RootPath: tmpDir,
|
BundleRootPath: tmpDir,
|
||||||
BundleRoot: vfs.MustNew(tmpDir),
|
BundleRoot: vfs.MustNew(tmpDir),
|
||||||
|
|
||||||
SyncRootPath: tmpDir,
|
SyncRootPath: tmpDir,
|
||||||
|
@ -259,7 +259,7 @@ func TestStatePullNoState(t *testing.T) {
|
||||||
}}
|
}}
|
||||||
|
|
||||||
b := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
RootPath: t.TempDir(),
|
BundleRootPath: t.TempDir(),
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
Bundle: config.Bundle{
|
Bundle: config.Bundle{
|
||||||
Target: "default",
|
Target: "default",
|
||||||
|
@ -447,7 +447,7 @@ func TestStatePullNewerDeploymentStateVersion(t *testing.T) {
|
||||||
}}
|
}}
|
||||||
|
|
||||||
b := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
RootPath: t.TempDir(),
|
BundleRootPath: t.TempDir(),
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
Bundle: config.Bundle{
|
Bundle: config.Bundle{
|
||||||
Target: "default",
|
Target: "default",
|
||||||
|
|
|
@ -10,6 +10,8 @@ import (
|
||||||
"github.com/databricks/cli/libs/log"
|
"github.com/databricks/cli/libs/log"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
const MaxStateFileSize = 10 * 1024 * 1024 // 10MB
|
||||||
|
|
||||||
type statePush struct {
|
type statePush struct {
|
||||||
filerFactory FilerFactory
|
filerFactory FilerFactory
|
||||||
}
|
}
|
||||||
|
@ -35,6 +37,17 @@ func (s *statePush) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostic
|
||||||
}
|
}
|
||||||
defer local.Close()
|
defer local.Close()
|
||||||
|
|
||||||
|
if !b.Config.Bundle.Force {
|
||||||
|
state, err := local.Stat()
|
||||||
|
if err != nil {
|
||||||
|
return diag.FromErr(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if state.Size() > MaxStateFileSize {
|
||||||
|
return diag.Errorf("Deployment state file size exceeds the maximum allowed size of %d bytes. Please reduce the number of resources in your bundle, split your bundle into multiple or re-run the command with --force flag.", MaxStateFileSize)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
log.Infof(ctx, "Writing local deployment state file to remote state directory")
|
log.Infof(ctx, "Writing local deployment state file to remote state directory")
|
||||||
err = f.Write(ctx, DeploymentStateFileName, local, filer.CreateParentDirectories, filer.OverwriteIfExists)
|
err = f.Write(ctx, DeploymentStateFileName, local, filer.CreateParentDirectories, filer.OverwriteIfExists)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -45,7 +45,7 @@ func TestStatePush(t *testing.T) {
|
||||||
}}
|
}}
|
||||||
|
|
||||||
b := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
RootPath: t.TempDir(),
|
BundleRootPath: t.TempDir(),
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
Bundle: config.Bundle{
|
Bundle: config.Bundle{
|
||||||
Target: "default",
|
Target: "default",
|
||||||
|
|
|
@ -27,7 +27,7 @@ func setupBundleForStateUpdate(t *testing.T) *bundle.Bundle {
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
return &bundle.Bundle{
|
return &bundle.Bundle{
|
||||||
RootPath: tmpDir,
|
BundleRootPath: tmpDir,
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
Bundle: config.Bundle{
|
Bundle: config.Bundle{
|
||||||
Target: "default",
|
Target: "default",
|
||||||
|
|
|
@ -4,6 +4,7 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"sort"
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle/config"
|
"github.com/databricks/cli/bundle/config"
|
||||||
"github.com/databricks/cli/bundle/config/resources"
|
"github.com/databricks/cli/bundle/config/resources"
|
||||||
|
@ -82,6 +83,10 @@ func BundleToTerraform(config *config.Root) *schema.Root {
|
||||||
conv(src, &dst)
|
conv(src, &dst)
|
||||||
|
|
||||||
if src.JobSettings != nil {
|
if src.JobSettings != nil {
|
||||||
|
sort.Slice(src.JobSettings.Tasks, func(i, j int) bool {
|
||||||
|
return src.JobSettings.Tasks[i].TaskKey < src.JobSettings.Tasks[j].TaskKey
|
||||||
|
})
|
||||||
|
|
||||||
for _, v := range src.Tasks {
|
for _, v := range src.Tasks {
|
||||||
var t schema.ResourceJobTask
|
var t schema.ResourceJobTask
|
||||||
conv(v, &t)
|
conv(v, &t)
|
||||||
|
@ -231,6 +236,13 @@ func BundleToTerraform(config *config.Root) *schema.Root {
|
||||||
tfroot.Resource.QualityMonitor[k] = &dst
|
tfroot.Resource.QualityMonitor[k] = &dst
|
||||||
}
|
}
|
||||||
|
|
||||||
|
for k, src := range config.Resources.Clusters {
|
||||||
|
noResources = false
|
||||||
|
var dst schema.ResourceCluster
|
||||||
|
conv(src, &dst)
|
||||||
|
tfroot.Resource.Cluster[k] = &dst
|
||||||
|
}
|
||||||
|
|
||||||
// We explicitly set "resource" to nil to omit it from a JSON encoding.
|
// We explicitly set "resource" to nil to omit it from a JSON encoding.
|
||||||
// This is required because the terraform CLI requires >= 1 resources defined
|
// This is required because the terraform CLI requires >= 1 resources defined
|
||||||
// if the "resource" property is used in a .tf.json file.
|
// if the "resource" property is used in a .tf.json file.
|
||||||
|
@ -394,6 +406,16 @@ func TerraformToBundle(state *resourcesState, config *config.Root) error {
|
||||||
}
|
}
|
||||||
cur.ID = instance.Attributes.ID
|
cur.ID = instance.Attributes.ID
|
||||||
config.Resources.Schemas[resource.Name] = cur
|
config.Resources.Schemas[resource.Name] = cur
|
||||||
|
case "databricks_cluster":
|
||||||
|
if config.Resources.Clusters == nil {
|
||||||
|
config.Resources.Clusters = make(map[string]*resources.Cluster)
|
||||||
|
}
|
||||||
|
cur := config.Resources.Clusters[resource.Name]
|
||||||
|
if cur == nil {
|
||||||
|
cur = &resources.Cluster{ModifiedStatus: resources.ModifiedStatusDeleted}
|
||||||
|
}
|
||||||
|
cur.ID = instance.Attributes.ID
|
||||||
|
config.Resources.Clusters[resource.Name] = cur
|
||||||
case "databricks_permissions":
|
case "databricks_permissions":
|
||||||
case "databricks_grants":
|
case "databricks_grants":
|
||||||
// Ignore; no need to pull these back into the configuration.
|
// Ignore; no need to pull these back into the configuration.
|
||||||
|
@ -443,6 +465,11 @@ func TerraformToBundle(state *resourcesState, config *config.Root) error {
|
||||||
src.ModifiedStatus = resources.ModifiedStatusCreated
|
src.ModifiedStatus = resources.ModifiedStatusCreated
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
for _, src := range config.Resources.Clusters {
|
||||||
|
if src.ModifiedStatus == "" && src.ID == "" {
|
||||||
|
src.ModifiedStatus = resources.ModifiedStatusCreated
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -663,6 +663,14 @@ func TestTerraformToBundleEmptyLocalResources(t *testing.T) {
|
||||||
{Attributes: stateInstanceAttributes{ID: "1"}},
|
{Attributes: stateInstanceAttributes{ID: "1"}},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
Type: "databricks_cluster",
|
||||||
|
Mode: "managed",
|
||||||
|
Name: "test_cluster",
|
||||||
|
Instances: []stateResourceInstance{
|
||||||
|
{Attributes: stateInstanceAttributes{ID: "1"}},
|
||||||
|
},
|
||||||
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
err := TerraformToBundle(&tfState, &config)
|
err := TerraformToBundle(&tfState, &config)
|
||||||
|
@ -692,6 +700,9 @@ func TestTerraformToBundleEmptyLocalResources(t *testing.T) {
|
||||||
assert.Equal(t, "1", config.Resources.Schemas["test_schema"].ID)
|
assert.Equal(t, "1", config.Resources.Schemas["test_schema"].ID)
|
||||||
assert.Equal(t, resources.ModifiedStatusDeleted, config.Resources.Schemas["test_schema"].ModifiedStatus)
|
assert.Equal(t, resources.ModifiedStatusDeleted, config.Resources.Schemas["test_schema"].ModifiedStatus)
|
||||||
|
|
||||||
|
assert.Equal(t, "1", config.Resources.Clusters["test_cluster"].ID)
|
||||||
|
assert.Equal(t, resources.ModifiedStatusDeleted, config.Resources.Clusters["test_cluster"].ModifiedStatus)
|
||||||
|
|
||||||
AssertFullResourceCoverage(t, &config)
|
AssertFullResourceCoverage(t, &config)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -754,6 +765,13 @@ func TestTerraformToBundleEmptyRemoteResources(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
Clusters: map[string]*resources.Cluster{
|
||||||
|
"test_cluster": {
|
||||||
|
ClusterSpec: &compute.ClusterSpec{
|
||||||
|
ClusterName: "test_cluster",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
var tfState = resourcesState{
|
var tfState = resourcesState{
|
||||||
|
@ -786,6 +804,9 @@ func TestTerraformToBundleEmptyRemoteResources(t *testing.T) {
|
||||||
assert.Equal(t, "", config.Resources.Schemas["test_schema"].ID)
|
assert.Equal(t, "", config.Resources.Schemas["test_schema"].ID)
|
||||||
assert.Equal(t, resources.ModifiedStatusCreated, config.Resources.Schemas["test_schema"].ModifiedStatus)
|
assert.Equal(t, resources.ModifiedStatusCreated, config.Resources.Schemas["test_schema"].ModifiedStatus)
|
||||||
|
|
||||||
|
assert.Equal(t, "", config.Resources.Clusters["test_cluster"].ID)
|
||||||
|
assert.Equal(t, resources.ModifiedStatusCreated, config.Resources.Clusters["test_cluster"].ModifiedStatus)
|
||||||
|
|
||||||
AssertFullResourceCoverage(t, &config)
|
AssertFullResourceCoverage(t, &config)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -888,6 +909,18 @@ func TestTerraformToBundleModifiedResources(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
Clusters: map[string]*resources.Cluster{
|
||||||
|
"test_cluster": {
|
||||||
|
ClusterSpec: &compute.ClusterSpec{
|
||||||
|
ClusterName: "test_cluster",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"test_cluster_new": {
|
||||||
|
ClusterSpec: &compute.ClusterSpec{
|
||||||
|
ClusterName: "test_cluster_new",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
var tfState = resourcesState{
|
var tfState = resourcesState{
|
||||||
|
@ -1020,6 +1053,22 @@ func TestTerraformToBundleModifiedResources(t *testing.T) {
|
||||||
{Attributes: stateInstanceAttributes{ID: "2"}},
|
{Attributes: stateInstanceAttributes{ID: "2"}},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
Type: "databricks_cluster",
|
||||||
|
Mode: "managed",
|
||||||
|
Name: "test_cluster",
|
||||||
|
Instances: []stateResourceInstance{
|
||||||
|
{Attributes: stateInstanceAttributes{ID: "1"}},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Type: "databricks_cluster",
|
||||||
|
Mode: "managed",
|
||||||
|
Name: "test_cluster_old",
|
||||||
|
Instances: []stateResourceInstance{
|
||||||
|
{Attributes: stateInstanceAttributes{ID: "2"}},
|
||||||
|
},
|
||||||
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
err := TerraformToBundle(&tfState, &config)
|
err := TerraformToBundle(&tfState, &config)
|
||||||
|
@ -1081,6 +1130,13 @@ func TestTerraformToBundleModifiedResources(t *testing.T) {
|
||||||
assert.Equal(t, "", config.Resources.Schemas["test_schema_new"].ID)
|
assert.Equal(t, "", config.Resources.Schemas["test_schema_new"].ID)
|
||||||
assert.Equal(t, resources.ModifiedStatusCreated, config.Resources.Schemas["test_schema_new"].ModifiedStatus)
|
assert.Equal(t, resources.ModifiedStatusCreated, config.Resources.Schemas["test_schema_new"].ModifiedStatus)
|
||||||
|
|
||||||
|
assert.Equal(t, "1", config.Resources.Clusters["test_cluster"].ID)
|
||||||
|
assert.Equal(t, "", config.Resources.Clusters["test_cluster"].ModifiedStatus)
|
||||||
|
assert.Equal(t, "2", config.Resources.Clusters["test_cluster_old"].ID)
|
||||||
|
assert.Equal(t, resources.ModifiedStatusDeleted, config.Resources.Clusters["test_cluster_old"].ModifiedStatus)
|
||||||
|
assert.Equal(t, "", config.Resources.Clusters["test_cluster_new"].ID)
|
||||||
|
assert.Equal(t, resources.ModifiedStatusCreated, config.Resources.Clusters["test_cluster_new"].ModifiedStatus)
|
||||||
|
|
||||||
AssertFullResourceCoverage(t, &config)
|
AssertFullResourceCoverage(t, &config)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -33,7 +33,7 @@ func TestInitEnvironmentVariables(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
b := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
RootPath: t.TempDir(),
|
BundleRootPath: t.TempDir(),
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
Bundle: config.Bundle{
|
Bundle: config.Bundle{
|
||||||
Target: "whatever",
|
Target: "whatever",
|
||||||
|
@ -60,7 +60,7 @@ func TestSetTempDirEnvVarsForUnixWithTmpDirSet(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
b := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
RootPath: t.TempDir(),
|
BundleRootPath: t.TempDir(),
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
Bundle: config.Bundle{
|
Bundle: config.Bundle{
|
||||||
Target: "whatever",
|
Target: "whatever",
|
||||||
|
@ -88,7 +88,7 @@ func TestSetTempDirEnvVarsForUnixWithTmpDirNotSet(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
b := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
RootPath: t.TempDir(),
|
BundleRootPath: t.TempDir(),
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
Bundle: config.Bundle{
|
Bundle: config.Bundle{
|
||||||
Target: "whatever",
|
Target: "whatever",
|
||||||
|
@ -114,7 +114,7 @@ func TestSetTempDirEnvVarsForWindowWithAllTmpDirEnvVarsSet(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
b := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
RootPath: t.TempDir(),
|
BundleRootPath: t.TempDir(),
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
Bundle: config.Bundle{
|
Bundle: config.Bundle{
|
||||||
Target: "whatever",
|
Target: "whatever",
|
||||||
|
@ -144,7 +144,7 @@ func TestSetTempDirEnvVarsForWindowWithUserProfileAndTempSet(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
b := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
RootPath: t.TempDir(),
|
BundleRootPath: t.TempDir(),
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
Bundle: config.Bundle{
|
Bundle: config.Bundle{
|
||||||
Target: "whatever",
|
Target: "whatever",
|
||||||
|
@ -174,7 +174,7 @@ func TestSetTempDirEnvVarsForWindowsWithoutAnyTempDirEnvVarsSet(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
b := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
RootPath: t.TempDir(),
|
BundleRootPath: t.TempDir(),
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
Bundle: config.Bundle{
|
Bundle: config.Bundle{
|
||||||
Target: "whatever",
|
Target: "whatever",
|
||||||
|
@ -202,7 +202,7 @@ func TestSetTempDirEnvVarsForWindowsWithoutAnyTempDirEnvVarsSet(t *testing.T) {
|
||||||
|
|
||||||
func TestSetProxyEnvVars(t *testing.T) {
|
func TestSetProxyEnvVars(t *testing.T) {
|
||||||
b := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
RootPath: t.TempDir(),
|
BundleRootPath: t.TempDir(),
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
Bundle: config.Bundle{
|
Bundle: config.Bundle{
|
||||||
Target: "whatever",
|
Target: "whatever",
|
||||||
|
@ -250,7 +250,7 @@ func TestSetProxyEnvVars(t *testing.T) {
|
||||||
|
|
||||||
func TestSetUserAgentExtraEnvVar(t *testing.T) {
|
func TestSetUserAgentExtraEnvVar(t *testing.T) {
|
||||||
b := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
RootPath: t.TempDir(),
|
BundleRootPath: t.TempDir(),
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
Experimental: &config.Experimental{
|
Experimental: &config.Experimental{
|
||||||
PyDABs: config.PyDABs{
|
PyDABs: config.PyDABs{
|
||||||
|
@ -333,7 +333,7 @@ func TestFindExecPathFromEnvironmentWithWrongVersion(t *testing.T) {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
m := &initialize{}
|
m := &initialize{}
|
||||||
b := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
RootPath: t.TempDir(),
|
BundleRootPath: t.TempDir(),
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
Bundle: config.Bundle{
|
Bundle: config.Bundle{
|
||||||
Target: "whatever",
|
Target: "whatever",
|
||||||
|
@ -357,7 +357,7 @@ func TestFindExecPathFromEnvironmentWithCorrectVersionAndNoBinary(t *testing.T)
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
m := &initialize{}
|
m := &initialize{}
|
||||||
b := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
RootPath: t.TempDir(),
|
BundleRootPath: t.TempDir(),
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
Bundle: config.Bundle{
|
Bundle: config.Bundle{
|
||||||
Target: "whatever",
|
Target: "whatever",
|
||||||
|
@ -380,7 +380,7 @@ func TestFindExecPathFromEnvironmentWithCorrectVersionAndBinary(t *testing.T) {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
m := &initialize{}
|
m := &initialize{}
|
||||||
b := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
RootPath: t.TempDir(),
|
BundleRootPath: t.TempDir(),
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
Bundle: config.Bundle{
|
Bundle: config.Bundle{
|
||||||
Target: "whatever",
|
Target: "whatever",
|
||||||
|
|
|
@ -58,6 +58,8 @@ func (m *interpolateMutator) Apply(ctx context.Context, b *bundle.Bundle) diag.D
|
||||||
path = dyn.NewPath(dyn.Key("databricks_quality_monitor")).Append(path[2:]...)
|
path = dyn.NewPath(dyn.Key("databricks_quality_monitor")).Append(path[2:]...)
|
||||||
case dyn.Key("schemas"):
|
case dyn.Key("schemas"):
|
||||||
path = dyn.NewPath(dyn.Key("databricks_schema")).Append(path[2:]...)
|
path = dyn.NewPath(dyn.Key("databricks_schema")).Append(path[2:]...)
|
||||||
|
case dyn.Key("clusters"):
|
||||||
|
path = dyn.NewPath(dyn.Key("databricks_cluster")).Append(path[2:]...)
|
||||||
default:
|
default:
|
||||||
// Trigger "key not found" for unknown resource types.
|
// Trigger "key not found" for unknown resource types.
|
||||||
return dyn.GetByPath(root, path)
|
return dyn.GetByPath(root, path)
|
||||||
|
|
|
@ -31,6 +31,7 @@ func TestInterpolate(t *testing.T) {
|
||||||
"other_model_serving": "${resources.model_serving_endpoints.other_model_serving.id}",
|
"other_model_serving": "${resources.model_serving_endpoints.other_model_serving.id}",
|
||||||
"other_registered_model": "${resources.registered_models.other_registered_model.id}",
|
"other_registered_model": "${resources.registered_models.other_registered_model.id}",
|
||||||
"other_schema": "${resources.schemas.other_schema.id}",
|
"other_schema": "${resources.schemas.other_schema.id}",
|
||||||
|
"other_cluster": "${resources.clusters.other_cluster.id}",
|
||||||
},
|
},
|
||||||
Tasks: []jobs.Task{
|
Tasks: []jobs.Task{
|
||||||
{
|
{
|
||||||
|
@ -67,6 +68,7 @@ func TestInterpolate(t *testing.T) {
|
||||||
assert.Equal(t, "${databricks_model_serving.other_model_serving.id}", j.Tags["other_model_serving"])
|
assert.Equal(t, "${databricks_model_serving.other_model_serving.id}", j.Tags["other_model_serving"])
|
||||||
assert.Equal(t, "${databricks_registered_model.other_registered_model.id}", j.Tags["other_registered_model"])
|
assert.Equal(t, "${databricks_registered_model.other_registered_model.id}", j.Tags["other_registered_model"])
|
||||||
assert.Equal(t, "${databricks_schema.other_schema.id}", j.Tags["other_schema"])
|
assert.Equal(t, "${databricks_schema.other_schema.id}", j.Tags["other_schema"])
|
||||||
|
assert.Equal(t, "${databricks_cluster.other_cluster.id}", j.Tags["other_cluster"])
|
||||||
|
|
||||||
m := b.Config.Resources.Models["my_model"]
|
m := b.Config.Resources.Models["my_model"]
|
||||||
assert.Equal(t, "my_model", m.Model.Name)
|
assert.Equal(t, "my_model", m.Model.Name)
|
||||||
|
|
|
@ -17,7 +17,7 @@ func TestLoadWithNoState(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
b := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
RootPath: t.TempDir(),
|
BundleRootPath: t.TempDir(),
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
Bundle: config.Bundle{
|
Bundle: config.Bundle{
|
||||||
Target: "whatever",
|
Target: "whatever",
|
||||||
|
|
|
@ -32,7 +32,7 @@ func mockStateFilerForPull(t *testing.T, contents map[string]any, merr error) fi
|
||||||
|
|
||||||
func statePullTestBundle(t *testing.T) *bundle.Bundle {
|
func statePullTestBundle(t *testing.T) *bundle.Bundle {
|
||||||
return &bundle.Bundle{
|
return &bundle.Bundle{
|
||||||
RootPath: t.TempDir(),
|
BundleRootPath: t.TempDir(),
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
Bundle: config.Bundle{
|
Bundle: config.Bundle{
|
||||||
Target: "default",
|
Target: "default",
|
||||||
|
|
|
@ -47,6 +47,17 @@ func (l *statePush) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostic
|
||||||
}
|
}
|
||||||
defer local.Close()
|
defer local.Close()
|
||||||
|
|
||||||
|
if !b.Config.Bundle.Force {
|
||||||
|
state, err := local.Stat()
|
||||||
|
if err != nil {
|
||||||
|
return diag.FromErr(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if state.Size() > deploy.MaxStateFileSize {
|
||||||
|
return diag.Errorf("Terraform state file size exceeds the maximum allowed size of %d bytes. Please reduce the number of resources in your bundle, split your bundle into multiple or re-run the command with --force flag", deploy.MaxStateFileSize)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Upload state file from local cache directory to filer.
|
// Upload state file from local cache directory to filer.
|
||||||
cmdio.LogString(ctx, "Updating deployment state...")
|
cmdio.LogString(ctx, "Updating deployment state...")
|
||||||
log.Infof(ctx, "Writing local state file to remote state directory")
|
log.Infof(ctx, "Writing local state file to remote state directory")
|
||||||
|
|
|
@ -3,6 +3,7 @@ package terraform
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
@ -29,7 +30,7 @@ func mockStateFilerForPush(t *testing.T, fn func(body io.Reader)) filer.Filer {
|
||||||
|
|
||||||
func statePushTestBundle(t *testing.T) *bundle.Bundle {
|
func statePushTestBundle(t *testing.T) *bundle.Bundle {
|
||||||
return &bundle.Bundle{
|
return &bundle.Bundle{
|
||||||
RootPath: t.TempDir(),
|
BundleRootPath: t.TempDir(),
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
Bundle: config.Bundle{
|
Bundle: config.Bundle{
|
||||||
Target: "default",
|
Target: "default",
|
||||||
|
@ -59,3 +60,29 @@ func TestStatePush(t *testing.T) {
|
||||||
diags := bundle.Apply(ctx, b, m)
|
diags := bundle.Apply(ctx, b, m)
|
||||||
assert.NoError(t, diags.Error())
|
assert.NoError(t, diags.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestStatePushLargeState(t *testing.T) {
|
||||||
|
mock := mockfiler.NewMockFiler(t)
|
||||||
|
m := &statePush{
|
||||||
|
identityFiler(mock),
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
b := statePushTestBundle(t)
|
||||||
|
|
||||||
|
largeState := map[string]any{}
|
||||||
|
for i := 0; i < 1000000; i++ {
|
||||||
|
largeState[fmt.Sprintf("field_%d", i)] = i
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write a stale local state file.
|
||||||
|
writeLocalState(t, ctx, b, largeState)
|
||||||
|
diags := bundle.Apply(ctx, b, m)
|
||||||
|
assert.ErrorContains(t, diags.Error(), "Terraform state file size exceeds the maximum allowed size of 10485760 bytes. Please reduce the number of resources in your bundle, split your bundle into multiple or re-run the command with --force flag")
|
||||||
|
|
||||||
|
// Force the write.
|
||||||
|
b = statePushTestBundle(t)
|
||||||
|
b.Config.Bundle.Force = true
|
||||||
|
diags = bundle.Apply(ctx, b, m)
|
||||||
|
assert.NoError(t, diags.Error())
|
||||||
|
}
|
||||||
|
|
|
@ -0,0 +1,52 @@
|
||||||
|
package tfdyn
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/databricks/cli/bundle/internal/tf/schema"
|
||||||
|
"github.com/databricks/cli/libs/dyn"
|
||||||
|
"github.com/databricks/cli/libs/dyn/convert"
|
||||||
|
"github.com/databricks/cli/libs/log"
|
||||||
|
"github.com/databricks/databricks-sdk-go/service/compute"
|
||||||
|
)
|
||||||
|
|
||||||
|
func convertClusterResource(ctx context.Context, vin dyn.Value) (dyn.Value, error) {
|
||||||
|
// Normalize the output value to the target schema.
|
||||||
|
vout, diags := convert.Normalize(compute.ClusterSpec{}, vin)
|
||||||
|
for _, diag := range diags {
|
||||||
|
log.Debugf(ctx, "cluster normalization diagnostic: %s", diag.Summary)
|
||||||
|
}
|
||||||
|
|
||||||
|
return vout, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type clusterConverter struct{}
|
||||||
|
|
||||||
|
func (clusterConverter) Convert(ctx context.Context, key string, vin dyn.Value, out *schema.Resources) error {
|
||||||
|
vout, err := convertClusterResource(ctx, vin)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// We always set no_wait as it allows DABs not to wait for cluster to be started.
|
||||||
|
vout, err = dyn.Set(vout, "no_wait", dyn.V(true))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add the converted resource to the output.
|
||||||
|
out.Cluster[key] = vout.AsAny()
|
||||||
|
|
||||||
|
// Configure permissions for this resource.
|
||||||
|
if permissions := convertPermissionsResource(ctx, vin); permissions != nil {
|
||||||
|
permissions.JobId = fmt.Sprintf("${databricks_cluster.%s.id}", key)
|
||||||
|
out.Permissions["cluster_"+key] = permissions
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
registerConverter("clusters", clusterConverter{})
|
||||||
|
}
|
|
@ -0,0 +1,97 @@
|
||||||
|
package tfdyn
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/databricks/cli/bundle/config/resources"
|
||||||
|
"github.com/databricks/cli/bundle/internal/tf/schema"
|
||||||
|
"github.com/databricks/cli/libs/dyn"
|
||||||
|
"github.com/databricks/cli/libs/dyn/convert"
|
||||||
|
"github.com/databricks/databricks-sdk-go/service/compute"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestConvertCluster(t *testing.T) {
|
||||||
|
var src = resources.Cluster{
|
||||||
|
ClusterSpec: &compute.ClusterSpec{
|
||||||
|
NumWorkers: 3,
|
||||||
|
SparkVersion: "13.3.x-scala2.12",
|
||||||
|
ClusterName: "cluster",
|
||||||
|
SparkConf: map[string]string{
|
||||||
|
"spark.executor.memory": "2g",
|
||||||
|
},
|
||||||
|
AwsAttributes: &compute.AwsAttributes{
|
||||||
|
Availability: "ON_DEMAND",
|
||||||
|
},
|
||||||
|
AzureAttributes: &compute.AzureAttributes{
|
||||||
|
Availability: "SPOT",
|
||||||
|
},
|
||||||
|
DataSecurityMode: "USER_ISOLATION",
|
||||||
|
NodeTypeId: "m5.xlarge",
|
||||||
|
Autoscale: &compute.AutoScale{
|
||||||
|
MinWorkers: 1,
|
||||||
|
MaxWorkers: 10,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
|
||||||
|
Permissions: []resources.Permission{
|
||||||
|
{
|
||||||
|
Level: "CAN_RUN",
|
||||||
|
UserName: "jack@gmail.com",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Level: "CAN_MANAGE",
|
||||||
|
ServicePrincipalName: "sp",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
vin, err := convert.FromTyped(src, dyn.NilValue)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
out := schema.NewResources()
|
||||||
|
err = clusterConverter{}.Convert(ctx, "my_cluster", vin, out)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
cluster := out.Cluster["my_cluster"]
|
||||||
|
assert.Equal(t, map[string]any{
|
||||||
|
"num_workers": int64(3),
|
||||||
|
"spark_version": "13.3.x-scala2.12",
|
||||||
|
"cluster_name": "cluster",
|
||||||
|
"spark_conf": map[string]any{
|
||||||
|
"spark.executor.memory": "2g",
|
||||||
|
},
|
||||||
|
"aws_attributes": map[string]any{
|
||||||
|
"availability": "ON_DEMAND",
|
||||||
|
},
|
||||||
|
"azure_attributes": map[string]any{
|
||||||
|
"availability": "SPOT",
|
||||||
|
},
|
||||||
|
"data_security_mode": "USER_ISOLATION",
|
||||||
|
"no_wait": true,
|
||||||
|
"node_type_id": "m5.xlarge",
|
||||||
|
"autoscale": map[string]any{
|
||||||
|
"min_workers": int64(1),
|
||||||
|
"max_workers": int64(10),
|
||||||
|
},
|
||||||
|
}, cluster)
|
||||||
|
|
||||||
|
// Assert equality on the permissions
|
||||||
|
assert.Equal(t, &schema.ResourcePermissions{
|
||||||
|
JobId: "${databricks_cluster.my_cluster.id}",
|
||||||
|
AccessControl: []schema.ResourcePermissionsAccessControl{
|
||||||
|
{
|
||||||
|
PermissionLevel: "CAN_RUN",
|
||||||
|
UserName: "jack@gmail.com",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
PermissionLevel: "CAN_MANAGE",
|
||||||
|
ServicePrincipalName: "sp",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}, out.Permissions["cluster_my_cluster"])
|
||||||
|
|
||||||
|
}
|
|
@ -3,6 +3,7 @@ package tfdyn
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"sort"
|
||||||
|
|
||||||
"github.com/databricks/cli/bundle/internal/tf/schema"
|
"github.com/databricks/cli/bundle/internal/tf/schema"
|
||||||
"github.com/databricks/cli/libs/dyn"
|
"github.com/databricks/cli/libs/dyn"
|
||||||
|
@ -19,8 +20,38 @@ func convertJobResource(ctx context.Context, vin dyn.Value) (dyn.Value, error) {
|
||||||
log.Debugf(ctx, "job normalization diagnostic: %s", diag.Summary)
|
log.Debugf(ctx, "job normalization diagnostic: %s", diag.Summary)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Sort the tasks of each job in the bundle by task key. Sorting
|
||||||
|
// the task keys ensures that the diff computed by terraform is correct and avoids
|
||||||
|
// recreates. For more details see the NOTE at
|
||||||
|
// https://registry.terraform.io/providers/databricks/databricks/latest/docs/resources/job#example-usage
|
||||||
|
// and https://github.com/databricks/terraform-provider-databricks/issues/4011
|
||||||
|
// and https://github.com/databricks/cli/pull/1776
|
||||||
|
vout := vin
|
||||||
|
var err error
|
||||||
|
tasks, ok := vin.Get("tasks").AsSequence()
|
||||||
|
if ok {
|
||||||
|
sort.Slice(tasks, func(i, j int) bool {
|
||||||
|
// We sort the tasks by their task key. Tasks without task keys are ordered
|
||||||
|
// before tasks with task keys. We do not error for those tasks
|
||||||
|
// since presence of a task_key is validated for in the Jobs backend.
|
||||||
|
tk1, ok := tasks[i].Get("task_key").AsString()
|
||||||
|
if !ok {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
tk2, ok := tasks[j].Get("task_key").AsString()
|
||||||
|
if !ok {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return tk1 < tk2
|
||||||
|
})
|
||||||
|
vout, err = dyn.Set(vin, "tasks", dyn.V(tasks))
|
||||||
|
if err != nil {
|
||||||
|
return dyn.InvalidValue, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Modify top-level keys.
|
// Modify top-level keys.
|
||||||
vout, err := renameKeys(vin, map[string]string{
|
vout, err = renameKeys(vout, map[string]string{
|
||||||
"tasks": "task",
|
"tasks": "task",
|
||||||
"job_clusters": "job_cluster",
|
"job_clusters": "job_cluster",
|
||||||
"parameters": "parameter",
|
"parameters": "parameter",
|
||||||
|
|
|
@ -42,8 +42,8 @@ func TestConvertJob(t *testing.T) {
|
||||||
},
|
},
|
||||||
Tasks: []jobs.Task{
|
Tasks: []jobs.Task{
|
||||||
{
|
{
|
||||||
TaskKey: "task_key",
|
TaskKey: "task_key_b",
|
||||||
JobClusterKey: "job_cluster_key",
|
JobClusterKey: "job_cluster_key_b",
|
||||||
Libraries: []compute.Library{
|
Libraries: []compute.Library{
|
||||||
{
|
{
|
||||||
Pypi: &compute.PythonPyPiLibrary{
|
Pypi: &compute.PythonPyPiLibrary{
|
||||||
|
@ -55,6 +55,17 @@ func TestConvertJob(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
TaskKey: "task_key_a",
|
||||||
|
JobClusterKey: "job_cluster_key_a",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
TaskKey: "task_key_c",
|
||||||
|
JobClusterKey: "job_cluster_key_c",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Description: "missing task key 😱",
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
Permissions: []resources.Permission{
|
Permissions: []resources.Permission{
|
||||||
|
@ -100,8 +111,15 @@ func TestConvertJob(t *testing.T) {
|
||||||
},
|
},
|
||||||
"task": []any{
|
"task": []any{
|
||||||
map[string]any{
|
map[string]any{
|
||||||
"task_key": "task_key",
|
"description": "missing task key 😱",
|
||||||
"job_cluster_key": "job_cluster_key",
|
},
|
||||||
|
map[string]any{
|
||||||
|
"task_key": "task_key_a",
|
||||||
|
"job_cluster_key": "job_cluster_key_a",
|
||||||
|
},
|
||||||
|
map[string]any{
|
||||||
|
"task_key": "task_key_b",
|
||||||
|
"job_cluster_key": "job_cluster_key_b",
|
||||||
"library": []any{
|
"library": []any{
|
||||||
map[string]any{
|
map[string]any{
|
||||||
"pypi": map[string]any{
|
"pypi": map[string]any{
|
||||||
|
@ -113,6 +131,10 @@ func TestConvertJob(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
map[string]any{
|
||||||
|
"task_key": "task_key_c",
|
||||||
|
"job_cluster_key": "job_cluster_key_c",
|
||||||
|
},
|
||||||
},
|
},
|
||||||
}, out.Job["my_job"])
|
}, out.Job["my_job"])
|
||||||
|
|
||||||
|
|
|
@ -13,7 +13,7 @@ import (
|
||||||
|
|
||||||
func TestParseResourcesStateWithNoFile(t *testing.T) {
|
func TestParseResourcesStateWithNoFile(t *testing.T) {
|
||||||
b := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
RootPath: t.TempDir(),
|
BundleRootPath: t.TempDir(),
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
Bundle: config.Bundle{
|
Bundle: config.Bundle{
|
||||||
Target: "whatever",
|
Target: "whatever",
|
||||||
|
@ -31,7 +31,7 @@ func TestParseResourcesStateWithNoFile(t *testing.T) {
|
||||||
func TestParseResourcesStateWithExistingStateFile(t *testing.T) {
|
func TestParseResourcesStateWithExistingStateFile(t *testing.T) {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
b := &bundle.Bundle{
|
b := &bundle.Bundle{
|
||||||
RootPath: t.TempDir(),
|
BundleRootPath: t.TempDir(),
|
||||||
Config: config.Root{
|
Config: config.Root{
|
||||||
Bundle: config.Bundle{
|
Bundle: config.Bundle{
|
||||||
Target: "whatever",
|
Target: "whatever",
|
||||||
|
|
|
@ -8,15 +8,13 @@ import (
|
||||||
// SetLocation sets the location of all values in the bundle to the given path.
|
// SetLocation sets the location of all values in the bundle to the given path.
|
||||||
// This is useful for testing where we need to associate configuration
|
// This is useful for testing where we need to associate configuration
|
||||||
// with the path it is loaded from.
|
// with the path it is loaded from.
|
||||||
func SetLocation(b *bundle.Bundle, prefix string, filePath string) {
|
func SetLocation(b *bundle.Bundle, prefix string, locations []dyn.Location) {
|
||||||
start := dyn.MustPathFromString(prefix)
|
start := dyn.MustPathFromString(prefix)
|
||||||
b.Config.Mutate(func(root dyn.Value) (dyn.Value, error) {
|
b.Config.Mutate(func(root dyn.Value) (dyn.Value, error) {
|
||||||
return dyn.Walk(root, func(p dyn.Path, v dyn.Value) (dyn.Value, error) {
|
return dyn.Walk(root, func(p dyn.Path, v dyn.Value) (dyn.Value, error) {
|
||||||
// If the path has the given prefix, set the location.
|
// If the path has the given prefix, set the location.
|
||||||
if p.HasPrefix(start) {
|
if p.HasPrefix(start) {
|
||||||
return v.WithLocations([]dyn.Location{{
|
return v.WithLocations(locations), nil
|
||||||
File: filePath,
|
|
||||||
}}), nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// The path is not nested under the given prefix.
|
// The path is not nested under the given prefix.
|
||||||
|
|
|
@ -51,9 +51,15 @@ func (r *root) Generate(path string) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
func Run(ctx context.Context, schema *tfjson.ProviderSchema, path string) error {
|
func Run(ctx context.Context, schema *tfjson.ProviderSchema, path string) error {
|
||||||
// Generate types for resources.
|
// Generate types for resources
|
||||||
var resources []*namedBlock
|
var resources []*namedBlock
|
||||||
for _, k := range sortKeys(schema.ResourceSchemas) {
|
for _, k := range sortKeys(schema.ResourceSchemas) {
|
||||||
|
// Skipping all plugin framework struct generation.
|
||||||
|
// TODO: This is a temporary fix, generation should be fixed in the future.
|
||||||
|
if strings.HasSuffix(k, "_pluginframework") {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
v := schema.ResourceSchemas[k]
|
v := schema.ResourceSchemas[k]
|
||||||
b := &namedBlock{
|
b := &namedBlock{
|
||||||
filePattern: "resource_%s.go",
|
filePattern: "resource_%s.go",
|
||||||
|
@ -71,6 +77,12 @@ func Run(ctx context.Context, schema *tfjson.ProviderSchema, path string) error
|
||||||
// Generate types for data sources.
|
// Generate types for data sources.
|
||||||
var dataSources []*namedBlock
|
var dataSources []*namedBlock
|
||||||
for _, k := range sortKeys(schema.DataSourceSchemas) {
|
for _, k := range sortKeys(schema.DataSourceSchemas) {
|
||||||
|
// Skipping all plugin framework struct generation.
|
||||||
|
// TODO: This is a temporary fix, generation should be fixed in the future.
|
||||||
|
if strings.HasSuffix(k, "_pluginframework") {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
v := schema.DataSourceSchemas[k]
|
v := schema.DataSourceSchemas[k]
|
||||||
b := &namedBlock{
|
b := &namedBlock{
|
||||||
filePattern: "data_source_%s.go",
|
filePattern: "data_source_%s.go",
|
||||||
|
|
|
@ -1,3 +1,3 @@
|
||||||
package schema
|
package schema
|
||||||
|
|
||||||
const ProviderVersion = "1.50.0"
|
const ProviderVersion = "1.52.0"
|
||||||
|
|
|
@ -2,8 +2,16 @@
|
||||||
|
|
||||||
package schema
|
package schema
|
||||||
|
|
||||||
|
type DataSourceClustersFilterBy struct {
|
||||||
|
ClusterSources []string `json:"cluster_sources,omitempty"`
|
||||||
|
ClusterStates []string `json:"cluster_states,omitempty"`
|
||||||
|
IsPinned bool `json:"is_pinned,omitempty"`
|
||||||
|
PolicyId string `json:"policy_id,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
type DataSourceClusters struct {
|
type DataSourceClusters struct {
|
||||||
ClusterNameContains string `json:"cluster_name_contains,omitempty"`
|
ClusterNameContains string `json:"cluster_name_contains,omitempty"`
|
||||||
Id string `json:"id,omitempty"`
|
Id string `json:"id,omitempty"`
|
||||||
Ids []string `json:"ids,omitempty"`
|
Ids []string `json:"ids,omitempty"`
|
||||||
|
FilterBy *DataSourceClustersFilterBy `json:"filter_by,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
|
@ -19,6 +19,7 @@ type DataSourceExternalLocationExternalLocationInfo struct {
|
||||||
CreatedBy string `json:"created_by,omitempty"`
|
CreatedBy string `json:"created_by,omitempty"`
|
||||||
CredentialId string `json:"credential_id,omitempty"`
|
CredentialId string `json:"credential_id,omitempty"`
|
||||||
CredentialName string `json:"credential_name,omitempty"`
|
CredentialName string `json:"credential_name,omitempty"`
|
||||||
|
Fallback bool `json:"fallback,omitempty"`
|
||||||
IsolationMode string `json:"isolation_mode,omitempty"`
|
IsolationMode string `json:"isolation_mode,omitempty"`
|
||||||
MetastoreId string `json:"metastore_id,omitempty"`
|
MetastoreId string `json:"metastore_id,omitempty"`
|
||||||
Name string `json:"name,omitempty"`
|
Name string `json:"name,omitempty"`
|
||||||
|
|
|
@ -18,12 +18,14 @@ type DataSourceShareObject struct {
|
||||||
AddedBy string `json:"added_by,omitempty"`
|
AddedBy string `json:"added_by,omitempty"`
|
||||||
CdfEnabled bool `json:"cdf_enabled,omitempty"`
|
CdfEnabled bool `json:"cdf_enabled,omitempty"`
|
||||||
Comment string `json:"comment,omitempty"`
|
Comment string `json:"comment,omitempty"`
|
||||||
|
Content string `json:"content,omitempty"`
|
||||||
DataObjectType string `json:"data_object_type"`
|
DataObjectType string `json:"data_object_type"`
|
||||||
HistoryDataSharingStatus string `json:"history_data_sharing_status,omitempty"`
|
HistoryDataSharingStatus string `json:"history_data_sharing_status,omitempty"`
|
||||||
Name string `json:"name"`
|
Name string `json:"name"`
|
||||||
SharedAs string `json:"shared_as,omitempty"`
|
SharedAs string `json:"shared_as,omitempty"`
|
||||||
StartVersion int `json:"start_version,omitempty"`
|
StartVersion int `json:"start_version,omitempty"`
|
||||||
Status string `json:"status,omitempty"`
|
Status string `json:"status,omitempty"`
|
||||||
|
StringSharedAs string `json:"string_shared_as,omitempty"`
|
||||||
Partition []DataSourceShareObjectPartition `json:"partition,omitempty"`
|
Partition []DataSourceShareObjectPartition `json:"partition,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -2,20 +2,14 @@
|
||||||
|
|
||||||
package schema
|
package schema
|
||||||
|
|
||||||
type ResourceAutomaticClusterUpdateWorkspaceSettingAutomaticClusterUpdateWorkspaceEnablementDetails struct {
|
|
||||||
ForcedForComplianceMode bool `json:"forced_for_compliance_mode,omitempty"`
|
|
||||||
UnavailableForDisabledEntitlement bool `json:"unavailable_for_disabled_entitlement,omitempty"`
|
|
||||||
UnavailableForNonEnterpriseTier bool `json:"unavailable_for_non_enterprise_tier,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type ResourceAutomaticClusterUpdateWorkspaceSettingAutomaticClusterUpdateWorkspaceMaintenanceWindowWeekDayBasedScheduleWindowStartTime struct {
|
type ResourceAutomaticClusterUpdateWorkspaceSettingAutomaticClusterUpdateWorkspaceMaintenanceWindowWeekDayBasedScheduleWindowStartTime struct {
|
||||||
Hours int `json:"hours,omitempty"`
|
Hours int `json:"hours"`
|
||||||
Minutes int `json:"minutes,omitempty"`
|
Minutes int `json:"minutes"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type ResourceAutomaticClusterUpdateWorkspaceSettingAutomaticClusterUpdateWorkspaceMaintenanceWindowWeekDayBasedSchedule struct {
|
type ResourceAutomaticClusterUpdateWorkspaceSettingAutomaticClusterUpdateWorkspaceMaintenanceWindowWeekDayBasedSchedule struct {
|
||||||
DayOfWeek string `json:"day_of_week,omitempty"`
|
DayOfWeek string `json:"day_of_week"`
|
||||||
Frequency string `json:"frequency,omitempty"`
|
Frequency string `json:"frequency"`
|
||||||
WindowStartTime *ResourceAutomaticClusterUpdateWorkspaceSettingAutomaticClusterUpdateWorkspaceMaintenanceWindowWeekDayBasedScheduleWindowStartTime `json:"window_start_time,omitempty"`
|
WindowStartTime *ResourceAutomaticClusterUpdateWorkspaceSettingAutomaticClusterUpdateWorkspaceMaintenanceWindowWeekDayBasedScheduleWindowStartTime `json:"window_start_time,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -25,9 +19,9 @@ type ResourceAutomaticClusterUpdateWorkspaceSettingAutomaticClusterUpdateWorkspa
|
||||||
|
|
||||||
type ResourceAutomaticClusterUpdateWorkspaceSettingAutomaticClusterUpdateWorkspace struct {
|
type ResourceAutomaticClusterUpdateWorkspaceSettingAutomaticClusterUpdateWorkspace struct {
|
||||||
CanToggle bool `json:"can_toggle,omitempty"`
|
CanToggle bool `json:"can_toggle,omitempty"`
|
||||||
Enabled bool `json:"enabled,omitempty"`
|
Enabled bool `json:"enabled"`
|
||||||
|
EnablementDetails []any `json:"enablement_details,omitempty"`
|
||||||
RestartEvenIfNoUpdatesAvailable bool `json:"restart_even_if_no_updates_available,omitempty"`
|
RestartEvenIfNoUpdatesAvailable bool `json:"restart_even_if_no_updates_available,omitempty"`
|
||||||
EnablementDetails *ResourceAutomaticClusterUpdateWorkspaceSettingAutomaticClusterUpdateWorkspaceEnablementDetails `json:"enablement_details,omitempty"`
|
|
||||||
MaintenanceWindow *ResourceAutomaticClusterUpdateWorkspaceSettingAutomaticClusterUpdateWorkspaceMaintenanceWindow `json:"maintenance_window,omitempty"`
|
MaintenanceWindow *ResourceAutomaticClusterUpdateWorkspaceSettingAutomaticClusterUpdateWorkspaceMaintenanceWindow `json:"maintenance_window,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -176,6 +176,7 @@ type ResourceCluster struct {
|
||||||
IdempotencyToken string `json:"idempotency_token,omitempty"`
|
IdempotencyToken string `json:"idempotency_token,omitempty"`
|
||||||
InstancePoolId string `json:"instance_pool_id,omitempty"`
|
InstancePoolId string `json:"instance_pool_id,omitempty"`
|
||||||
IsPinned bool `json:"is_pinned,omitempty"`
|
IsPinned bool `json:"is_pinned,omitempty"`
|
||||||
|
NoWait bool `json:"no_wait,omitempty"`
|
||||||
NodeTypeId string `json:"node_type_id,omitempty"`
|
NodeTypeId string `json:"node_type_id,omitempty"`
|
||||||
NumWorkers int `json:"num_workers,omitempty"`
|
NumWorkers int `json:"num_workers,omitempty"`
|
||||||
PolicyId string `json:"policy_id,omitempty"`
|
PolicyId string `json:"policy_id,omitempty"`
|
||||||
|
|
|
@ -3,8 +3,8 @@
|
||||||
package schema
|
package schema
|
||||||
|
|
||||||
type ResourceComplianceSecurityProfileWorkspaceSettingComplianceSecurityProfileWorkspace struct {
|
type ResourceComplianceSecurityProfileWorkspaceSettingComplianceSecurityProfileWorkspace struct {
|
||||||
ComplianceStandards []string `json:"compliance_standards,omitempty"`
|
ComplianceStandards []string `json:"compliance_standards"`
|
||||||
IsEnabled bool `json:"is_enabled,omitempty"`
|
IsEnabled bool `json:"is_enabled"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type ResourceComplianceSecurityProfileWorkspaceSetting struct {
|
type ResourceComplianceSecurityProfileWorkspaceSetting struct {
|
||||||
|
|
|
@ -3,7 +3,7 @@
|
||||||
package schema
|
package schema
|
||||||
|
|
||||||
type ResourceEnhancedSecurityMonitoringWorkspaceSettingEnhancedSecurityMonitoringWorkspace struct {
|
type ResourceEnhancedSecurityMonitoringWorkspaceSettingEnhancedSecurityMonitoringWorkspace struct {
|
||||||
IsEnabled bool `json:"is_enabled,omitempty"`
|
IsEnabled bool `json:"is_enabled"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type ResourceEnhancedSecurityMonitoringWorkspaceSetting struct {
|
type ResourceEnhancedSecurityMonitoringWorkspaceSetting struct {
|
||||||
|
|
|
@ -97,11 +97,13 @@ type ResourceModelServingConfigServedEntities struct {
|
||||||
type ResourceModelServingConfigServedModels struct {
|
type ResourceModelServingConfigServedModels struct {
|
||||||
EnvironmentVars map[string]string `json:"environment_vars,omitempty"`
|
EnvironmentVars map[string]string `json:"environment_vars,omitempty"`
|
||||||
InstanceProfileArn string `json:"instance_profile_arn,omitempty"`
|
InstanceProfileArn string `json:"instance_profile_arn,omitempty"`
|
||||||
|
MaxProvisionedThroughput int `json:"max_provisioned_throughput,omitempty"`
|
||||||
|
MinProvisionedThroughput int `json:"min_provisioned_throughput,omitempty"`
|
||||||
ModelName string `json:"model_name"`
|
ModelName string `json:"model_name"`
|
||||||
ModelVersion string `json:"model_version"`
|
ModelVersion string `json:"model_version"`
|
||||||
Name string `json:"name,omitempty"`
|
Name string `json:"name,omitempty"`
|
||||||
ScaleToZeroEnabled bool `json:"scale_to_zero_enabled,omitempty"`
|
ScaleToZeroEnabled bool `json:"scale_to_zero_enabled,omitempty"`
|
||||||
WorkloadSize string `json:"workload_size"`
|
WorkloadSize string `json:"workload_size,omitempty"`
|
||||||
WorkloadType string `json:"workload_type,omitempty"`
|
WorkloadType string `json:"workload_type,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -18,20 +18,27 @@ type ResourceShareObject struct {
|
||||||
AddedBy string `json:"added_by,omitempty"`
|
AddedBy string `json:"added_by,omitempty"`
|
||||||
CdfEnabled bool `json:"cdf_enabled,omitempty"`
|
CdfEnabled bool `json:"cdf_enabled,omitempty"`
|
||||||
Comment string `json:"comment,omitempty"`
|
Comment string `json:"comment,omitempty"`
|
||||||
|
Content string `json:"content,omitempty"`
|
||||||
DataObjectType string `json:"data_object_type"`
|
DataObjectType string `json:"data_object_type"`
|
||||||
HistoryDataSharingStatus string `json:"history_data_sharing_status,omitempty"`
|
HistoryDataSharingStatus string `json:"history_data_sharing_status,omitempty"`
|
||||||
Name string `json:"name"`
|
Name string `json:"name"`
|
||||||
SharedAs string `json:"shared_as,omitempty"`
|
SharedAs string `json:"shared_as,omitempty"`
|
||||||
StartVersion int `json:"start_version,omitempty"`
|
StartVersion int `json:"start_version,omitempty"`
|
||||||
Status string `json:"status,omitempty"`
|
Status string `json:"status,omitempty"`
|
||||||
|
StringSharedAs string `json:"string_shared_as,omitempty"`
|
||||||
Partition []ResourceShareObjectPartition `json:"partition,omitempty"`
|
Partition []ResourceShareObjectPartition `json:"partition,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type ResourceShare struct {
|
type ResourceShare struct {
|
||||||
|
Comment string `json:"comment,omitempty"`
|
||||||
CreatedAt int `json:"created_at,omitempty"`
|
CreatedAt int `json:"created_at,omitempty"`
|
||||||
CreatedBy string `json:"created_by,omitempty"`
|
CreatedBy string `json:"created_by,omitempty"`
|
||||||
Id string `json:"id,omitempty"`
|
Id string `json:"id,omitempty"`
|
||||||
Name string `json:"name"`
|
Name string `json:"name"`
|
||||||
Owner string `json:"owner,omitempty"`
|
Owner string `json:"owner,omitempty"`
|
||||||
|
StorageLocation string `json:"storage_location,omitempty"`
|
||||||
|
StorageRoot string `json:"storage_root,omitempty"`
|
||||||
|
UpdatedAt int `json:"updated_at,omitempty"`
|
||||||
|
UpdatedBy string `json:"updated_by,omitempty"`
|
||||||
Object []ResourceShareObject `json:"object,omitempty"`
|
Object []ResourceShareObject `json:"object,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
|
@ -15,6 +15,7 @@ type ResourceSqlTable struct {
|
||||||
ClusterKeys []string `json:"cluster_keys,omitempty"`
|
ClusterKeys []string `json:"cluster_keys,omitempty"`
|
||||||
Comment string `json:"comment,omitempty"`
|
Comment string `json:"comment,omitempty"`
|
||||||
DataSourceFormat string `json:"data_source_format,omitempty"`
|
DataSourceFormat string `json:"data_source_format,omitempty"`
|
||||||
|
EffectiveProperties map[string]string `json:"effective_properties,omitempty"`
|
||||||
Id string `json:"id,omitempty"`
|
Id string `json:"id,omitempty"`
|
||||||
Name string `json:"name"`
|
Name string `json:"name"`
|
||||||
Options map[string]string `json:"options,omitempty"`
|
Options map[string]string `json:"options,omitempty"`
|
||||||
|
|
|
@ -21,7 +21,7 @@ type Root struct {
|
||||||
|
|
||||||
const ProviderHost = "registry.terraform.io"
|
const ProviderHost = "registry.terraform.io"
|
||||||
const ProviderSource = "databricks/databricks"
|
const ProviderSource = "databricks/databricks"
|
||||||
const ProviderVersion = "1.50.0"
|
const ProviderVersion = "1.52.0"
|
||||||
|
|
||||||
func NewRoot() *Root {
|
func NewRoot() *Root {
|
||||||
return &Root{
|
return &Root{
|
||||||
|
|
|
@ -10,6 +10,7 @@ import (
|
||||||
"github.com/databricks/cli/bundle/config/resources"
|
"github.com/databricks/cli/bundle/config/resources"
|
||||||
"github.com/databricks/cli/bundle/internal/bundletest"
|
"github.com/databricks/cli/bundle/internal/bundletest"
|
||||||
"github.com/databricks/cli/internal/testutil"
|
"github.com/databricks/cli/internal/testutil"
|
||||||
|
"github.com/databricks/cli/libs/dyn"
|
||||||
"github.com/databricks/databricks-sdk-go/service/compute"
|
"github.com/databricks/databricks-sdk-go/service/compute"
|
||||||
"github.com/databricks/databricks-sdk-go/service/jobs"
|
"github.com/databricks/databricks-sdk-go/service/jobs"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
@ -61,7 +62,7 @@ func TestGlobReferencesExpandedForTaskLibraries(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
bundletest.SetLocation(b, ".", filepath.Join(dir, "resource.yml"))
|
bundletest.SetLocation(b, ".", []dyn.Location{{File: filepath.Join(dir, "resource.yml")}})
|
||||||
|
|
||||||
diags := bundle.Apply(context.Background(), b, ExpandGlobReferences())
|
diags := bundle.Apply(context.Background(), b, ExpandGlobReferences())
|
||||||
require.Empty(t, diags)
|
require.Empty(t, diags)
|
||||||
|
@ -146,7 +147,7 @@ func TestGlobReferencesExpandedForForeachTaskLibraries(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
bundletest.SetLocation(b, ".", filepath.Join(dir, "resource.yml"))
|
bundletest.SetLocation(b, ".", []dyn.Location{{File: filepath.Join(dir, "resource.yml")}})
|
||||||
|
|
||||||
diags := bundle.Apply(context.Background(), b, ExpandGlobReferences())
|
diags := bundle.Apply(context.Background(), b, ExpandGlobReferences())
|
||||||
require.Empty(t, diags)
|
require.Empty(t, diags)
|
||||||
|
@ -221,7 +222,7 @@ func TestGlobReferencesExpandedForEnvironmentsDeps(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
bundletest.SetLocation(b, ".", filepath.Join(dir, "resource.yml"))
|
bundletest.SetLocation(b, ".", []dyn.Location{{File: filepath.Join(dir, "resource.yml")}})
|
||||||
|
|
||||||
diags := bundle.Apply(context.Background(), b, ExpandGlobReferences())
|
diags := bundle.Apply(context.Background(), b, ExpandGlobReferences())
|
||||||
require.Empty(t, diags)
|
require.Empty(t, diags)
|
||||||
|
|
|
@ -15,9 +15,10 @@ import (
|
||||||
"github.com/databricks/cli/bundle/deploy/terraform"
|
"github.com/databricks/cli/bundle/deploy/terraform"
|
||||||
"github.com/databricks/cli/bundle/libraries"
|
"github.com/databricks/cli/bundle/libraries"
|
||||||
"github.com/databricks/cli/bundle/permissions"
|
"github.com/databricks/cli/bundle/permissions"
|
||||||
"github.com/databricks/cli/bundle/python"
|
|
||||||
"github.com/databricks/cli/bundle/scripts"
|
"github.com/databricks/cli/bundle/scripts"
|
||||||
|
"github.com/databricks/cli/bundle/trampoline"
|
||||||
"github.com/databricks/cli/libs/cmdio"
|
"github.com/databricks/cli/libs/cmdio"
|
||||||
|
"github.com/databricks/cli/libs/sync"
|
||||||
terraformlib "github.com/databricks/cli/libs/terraform"
|
terraformlib "github.com/databricks/cli/libs/terraform"
|
||||||
tfjson "github.com/hashicorp/terraform-json"
|
tfjson "github.com/hashicorp/terraform-json"
|
||||||
)
|
)
|
||||||
|
@ -128,7 +129,7 @@ properties such as the 'catalog' or 'storage' are changed:`
|
||||||
}
|
}
|
||||||
|
|
||||||
// The deploy phase deploys artifacts and resources.
|
// The deploy phase deploys artifacts and resources.
|
||||||
func Deploy() bundle.Mutator {
|
func Deploy(outputHandler sync.OutputHandler) bundle.Mutator {
|
||||||
// Core mutators that CRUD resources and modify deployment state. These
|
// Core mutators that CRUD resources and modify deployment state. These
|
||||||
// mutators need informed consent if they are potentially destructive.
|
// mutators need informed consent if they are potentially destructive.
|
||||||
deployCore := bundle.Defer(
|
deployCore := bundle.Defer(
|
||||||
|
@ -156,8 +157,8 @@ func Deploy() bundle.Mutator {
|
||||||
artifacts.CleanUp(),
|
artifacts.CleanUp(),
|
||||||
libraries.ExpandGlobReferences(),
|
libraries.ExpandGlobReferences(),
|
||||||
libraries.Upload(),
|
libraries.Upload(),
|
||||||
python.TransformWheelTask(),
|
trampoline.TransformWheelTask(),
|
||||||
files.Upload(),
|
files.Upload(outputHandler),
|
||||||
deploy.StateUpdate(),
|
deploy.StateUpdate(),
|
||||||
deploy.StatePush(),
|
deploy.StatePush(),
|
||||||
permissions.ApplyWorkspaceRootPermissions(),
|
permissions.ApplyWorkspaceRootPermissions(),
|
||||||
|
|
|
@ -9,8 +9,8 @@ import (
|
||||||
"github.com/databricks/cli/bundle/deploy/metadata"
|
"github.com/databricks/cli/bundle/deploy/metadata"
|
||||||
"github.com/databricks/cli/bundle/deploy/terraform"
|
"github.com/databricks/cli/bundle/deploy/terraform"
|
||||||
"github.com/databricks/cli/bundle/permissions"
|
"github.com/databricks/cli/bundle/permissions"
|
||||||
"github.com/databricks/cli/bundle/python"
|
|
||||||
"github.com/databricks/cli/bundle/scripts"
|
"github.com/databricks/cli/bundle/scripts"
|
||||||
|
"github.com/databricks/cli/bundle/trampoline"
|
||||||
)
|
)
|
||||||
|
|
||||||
// The initialize phase fills in defaults and connects to the workspace.
|
// The initialize phase fills in defaults and connects to the workspace.
|
||||||
|
@ -39,9 +39,16 @@ func Initialize() bundle.Mutator {
|
||||||
mutator.MergePipelineClusters(),
|
mutator.MergePipelineClusters(),
|
||||||
mutator.InitializeWorkspaceClient(),
|
mutator.InitializeWorkspaceClient(),
|
||||||
mutator.PopulateCurrentUser(),
|
mutator.PopulateCurrentUser(),
|
||||||
|
|
||||||
mutator.DefineDefaultWorkspaceRoot(),
|
mutator.DefineDefaultWorkspaceRoot(),
|
||||||
mutator.ExpandWorkspaceRoot(),
|
mutator.ExpandWorkspaceRoot(),
|
||||||
mutator.DefineDefaultWorkspacePaths(),
|
mutator.DefineDefaultWorkspacePaths(),
|
||||||
|
mutator.PrependWorkspacePrefix(),
|
||||||
|
|
||||||
|
// This mutator needs to be run before variable interpolation because it
|
||||||
|
// searches for strings with variable references in them.
|
||||||
|
mutator.RewriteWorkspacePrefix(),
|
||||||
|
|
||||||
mutator.SetVariables(),
|
mutator.SetVariables(),
|
||||||
// Intentionally placed before ResolveVariableReferencesInLookup, ResolveResourceReferences,
|
// Intentionally placed before ResolveVariableReferencesInLookup, ResolveResourceReferences,
|
||||||
// ResolveVariableReferencesInComplexVariables and ResolveVariableReferences.
|
// ResolveVariableReferencesInComplexVariables and ResolveVariableReferences.
|
||||||
|
@ -66,7 +73,7 @@ func Initialize() bundle.Mutator {
|
||||||
mutator.ConfigureWSFS(),
|
mutator.ConfigureWSFS(),
|
||||||
|
|
||||||
mutator.TranslatePaths(),
|
mutator.TranslatePaths(),
|
||||||
python.WrapperWarning(),
|
trampoline.WrapperWarning(),
|
||||||
permissions.ApplyBundlePermissions(),
|
permissions.ApplyBundlePermissions(),
|
||||||
permissions.FilterCurrentUser(),
|
permissions.FilterCurrentUser(),
|
||||||
metadata.AnnotateJobs(),
|
metadata.AnnotateJobs(),
|
||||||
|
|
|
@ -167,7 +167,7 @@ func renderDiagnosticsOnly(out io.Writer, b *bundle.Bundle, diags diag.Diagnosti
|
||||||
|
|
||||||
// Make location relative to bundle root
|
// Make location relative to bundle root
|
||||||
if d.Locations[i].File != "" {
|
if d.Locations[i].File != "" {
|
||||||
out, err := filepath.Rel(b.RootPath, d.Locations[i].File)
|
out, err := filepath.Rel(b.BundleRootPath, d.Locations[i].File)
|
||||||
// if we can't relativize the path, just use path as-is
|
// if we can't relativize the path, just use path as-is
|
||||||
if err == nil {
|
if err == nil {
|
||||||
d.Locations[i].File = out
|
d.Locations[i].File = out
|
||||||
|
|
|
@ -59,6 +59,127 @@
|
||||||
"cli": {
|
"cli": {
|
||||||
"bundle": {
|
"bundle": {
|
||||||
"config": {
|
"config": {
|
||||||
|
"resources.Cluster": {
|
||||||
|
"anyOf": [
|
||||||
|
{
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"apply_policy_default_values": {
|
||||||
|
"description": "When set to true, fixed and default values from the policy will be used for fields that are omitted. When set to false, only fixed values from the policy will be applied.",
|
||||||
|
"$ref": "#/$defs/bool"
|
||||||
|
},
|
||||||
|
"autoscale": {
|
||||||
|
"description": "Parameters needed in order to automatically scale clusters up and down based on load.\nNote: autoscaling works best with DB runtime versions 3.0 or later.",
|
||||||
|
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/compute.AutoScale"
|
||||||
|
},
|
||||||
|
"autotermination_minutes": {
|
||||||
|
"description": "Automatically terminates the cluster after it is inactive for this time in minutes. If not set,\nthis cluster will not be automatically terminated. If specified, the threshold must be between\n10 and 10000 minutes.\nUsers can also set this value to 0 to explicitly disable automatic termination.",
|
||||||
|
"$ref": "#/$defs/int"
|
||||||
|
},
|
||||||
|
"aws_attributes": {
|
||||||
|
"description": "Attributes related to clusters running on Amazon Web Services.\nIf not specified at cluster creation, a set of default values will be used.",
|
||||||
|
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/compute.AwsAttributes"
|
||||||
|
},
|
||||||
|
"azure_attributes": {
|
||||||
|
"description": "Attributes related to clusters running on Microsoft Azure.\nIf not specified at cluster creation, a set of default values will be used.",
|
||||||
|
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/compute.AzureAttributes"
|
||||||
|
},
|
||||||
|
"cluster_log_conf": {
|
||||||
|
"description": "The configuration for delivering spark logs to a long-term storage destination.\nTwo kinds of destinations (dbfs and s3) are supported. Only one destination can be specified\nfor one cluster. If the conf is given, the logs will be delivered to the destination every\n`5 mins`. The destination of driver logs is `$destination/$clusterId/driver`, while\nthe destination of executor logs is `$destination/$clusterId/executor`.",
|
||||||
|
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/compute.ClusterLogConf"
|
||||||
|
},
|
||||||
|
"cluster_name": {
|
||||||
|
"description": "Cluster name requested by the user. This doesn't have to be unique.\nIf not specified at creation, the cluster name will be an empty string.\n",
|
||||||
|
"$ref": "#/$defs/string"
|
||||||
|
},
|
||||||
|
"custom_tags": {
|
||||||
|
"description": "Additional tags for cluster resources. Databricks will tag all cluster resources (e.g., AWS\ninstances and EBS volumes) with these tags in addition to `default_tags`. Notes:\n\n- Currently, Databricks allows at most 45 custom tags\n\n- Clusters can only reuse cloud resources if the resources' tags are a subset of the cluster tags",
|
||||||
|
"$ref": "#/$defs/map/string"
|
||||||
|
},
|
||||||
|
"data_security_mode": {
|
||||||
|
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/compute.DataSecurityMode"
|
||||||
|
},
|
||||||
|
"docker_image": {
|
||||||
|
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/compute.DockerImage"
|
||||||
|
},
|
||||||
|
"driver_instance_pool_id": {
|
||||||
|
"description": "The optional ID of the instance pool for the driver of the cluster belongs.\nThe pool cluster uses the instance pool with id (instance_pool_id) if the driver pool is not\nassigned.",
|
||||||
|
"$ref": "#/$defs/string"
|
||||||
|
},
|
||||||
|
"driver_node_type_id": {
|
||||||
|
"description": "The node type of the Spark driver. Note that this field is optional;\nif unset, the driver node type will be set as the same value\nas `node_type_id` defined above.\n",
|
||||||
|
"$ref": "#/$defs/string"
|
||||||
|
},
|
||||||
|
"enable_elastic_disk": {
|
||||||
|
"description": "Autoscaling Local Storage: when enabled, this cluster will dynamically acquire additional disk\nspace when its Spark workers are running low on disk space. This feature requires specific AWS\npermissions to function correctly - refer to the User Guide for more details.",
|
||||||
|
"$ref": "#/$defs/bool"
|
||||||
|
},
|
||||||
|
"enable_local_disk_encryption": {
|
||||||
|
"description": "Whether to enable LUKS on cluster VMs' local disks",
|
||||||
|
"$ref": "#/$defs/bool"
|
||||||
|
},
|
||||||
|
"gcp_attributes": {
|
||||||
|
"description": "Attributes related to clusters running on Google Cloud Platform.\nIf not specified at cluster creation, a set of default values will be used.",
|
||||||
|
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/compute.GcpAttributes"
|
||||||
|
},
|
||||||
|
"init_scripts": {
|
||||||
|
"description": "The configuration for storing init scripts. Any number of destinations can be specified. The scripts are executed sequentially in the order provided. If `cluster_log_conf` is specified, init script logs are sent to `\u003cdestination\u003e/\u003ccluster-ID\u003e/init_scripts`.",
|
||||||
|
"$ref": "#/$defs/slice/github.com/databricks/databricks-sdk-go/service/compute.InitScriptInfo"
|
||||||
|
},
|
||||||
|
"instance_pool_id": {
|
||||||
|
"description": "The optional ID of the instance pool to which the cluster belongs.",
|
||||||
|
"$ref": "#/$defs/string"
|
||||||
|
},
|
||||||
|
"node_type_id": {
|
||||||
|
"description": "This field encodes, through a single value, the resources available to each of\nthe Spark nodes in this cluster. For example, the Spark nodes can be provisioned\nand optimized for memory or compute intensive workloads. A list of available node\ntypes can be retrieved by using the :method:clusters/listNodeTypes API call.\n",
|
||||||
|
"$ref": "#/$defs/string"
|
||||||
|
},
|
||||||
|
"num_workers": {
|
||||||
|
"description": "Number of worker nodes that this cluster should have. A cluster has one Spark Driver\nand `num_workers` Executors for a total of `num_workers` + 1 Spark nodes.\n\nNote: When reading the properties of a cluster, this field reflects the desired number\nof workers rather than the actual current number of workers. For instance, if a cluster\nis resized from 5 to 10 workers, this field will immediately be updated to reflect\nthe target size of 10 workers, whereas the workers listed in `spark_info` will gradually\nincrease from 5 to 10 as the new nodes are provisioned.",
|
||||||
|
"$ref": "#/$defs/int"
|
||||||
|
},
|
||||||
|
"permissions": {
|
||||||
|
"$ref": "#/$defs/slice/github.com/databricks/cli/bundle/config/resources.Permission"
|
||||||
|
},
|
||||||
|
"policy_id": {
|
||||||
|
"description": "The ID of the cluster policy used to create the cluster if applicable.",
|
||||||
|
"$ref": "#/$defs/string"
|
||||||
|
},
|
||||||
|
"runtime_engine": {
|
||||||
|
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/compute.RuntimeEngine"
|
||||||
|
},
|
||||||
|
"single_user_name": {
|
||||||
|
"description": "Single user name if data_security_mode is `SINGLE_USER`",
|
||||||
|
"$ref": "#/$defs/string"
|
||||||
|
},
|
||||||
|
"spark_conf": {
|
||||||
|
"description": "An object containing a set of optional, user-specified Spark configuration key-value pairs.\nUsers can also pass in a string of extra JVM options to the driver and the executors via\n`spark.driver.extraJavaOptions` and `spark.executor.extraJavaOptions` respectively.\n",
|
||||||
|
"$ref": "#/$defs/map/string"
|
||||||
|
},
|
||||||
|
"spark_env_vars": {
|
||||||
|
"description": "An object containing a set of optional, user-specified environment variable key-value pairs.\nPlease note that key-value pair of the form (X,Y) will be exported as is (i.e.,\n`export X='Y'`) while launching the driver and workers.\n\nIn order to specify an additional set of `SPARK_DAEMON_JAVA_OPTS`, we recommend appending\nthem to `$SPARK_DAEMON_JAVA_OPTS` as shown in the example below. This ensures that all\ndefault databricks managed environmental variables are included as well.\n\nExample Spark environment variables:\n`{\"SPARK_WORKER_MEMORY\": \"28000m\", \"SPARK_LOCAL_DIRS\": \"/local_disk0\"}` or\n`{\"SPARK_DAEMON_JAVA_OPTS\": \"$SPARK_DAEMON_JAVA_OPTS -Dspark.shuffle.service.enabled=true\"}`",
|
||||||
|
"$ref": "#/$defs/map/string"
|
||||||
|
},
|
||||||
|
"spark_version": {
|
||||||
|
"description": "The Spark version of the cluster, e.g. `3.3.x-scala2.11`.\nA list of available Spark versions can be retrieved by using\nthe :method:clusters/sparkVersions API call.\n",
|
||||||
|
"$ref": "#/$defs/string"
|
||||||
|
},
|
||||||
|
"ssh_public_keys": {
|
||||||
|
"description": "SSH public key contents that will be added to each Spark node in this cluster. The\ncorresponding private keys can be used to login with the user name `ubuntu` on port `2200`.\nUp to 10 keys can be specified.",
|
||||||
|
"$ref": "#/$defs/slice/string"
|
||||||
|
},
|
||||||
|
"workload_type": {
|
||||||
|
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/compute.WorkloadType"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"additionalProperties": false
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "string",
|
||||||
|
"pattern": "\\$\\{(var(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)+)\\}"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
"resources.Grant": {
|
"resources.Grant": {
|
||||||
"anyOf": [
|
"anyOf": [
|
||||||
{
|
{
|
||||||
|
@ -109,7 +230,7 @@
|
||||||
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.JobEmailNotifications"
|
"$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.JobEmailNotifications"
|
||||||
},
|
},
|
||||||
"environments": {
|
"environments": {
|
||||||
"description": "A list of task execution environment specifications that can be referenced by tasks of this job.",
|
"description": "A list of task execution environment specifications that can be referenced by serverless tasks of this job.\nAn environment is required to be present for serverless tasks.\nFor serverless notebook tasks, the environment is accessible in the notebook environment panel.\nFor other serverless tasks, the task environment is required to be specified using environment_key in the task settings.",
|
||||||
"$ref": "#/$defs/slice/github.com/databricks/databricks-sdk-go/service/jobs.JobEnvironment"
|
"$ref": "#/$defs/slice/github.com/databricks/databricks-sdk-go/service/jobs.JobEnvironment"
|
||||||
},
|
},
|
||||||
"format": {
|
"format": {
|
||||||
|
@ -293,7 +414,7 @@
|
||||||
"$ref": "#/$defs/slice/github.com/databricks/cli/bundle/config/resources.Permission"
|
"$ref": "#/$defs/slice/github.com/databricks/cli/bundle/config/resources.Permission"
|
||||||
},
|
},
|
||||||
"rate_limits": {
|
"rate_limits": {
|
||||||
"description": "Rate limits to be applied to the serving endpoint. NOTE: only external and foundation model endpoints are supported as of now.",
|
"description": "Rate limits to be applied to the serving endpoint. NOTE: this field is deprecated, please use AI Gateway to manage rate limits.",
|
||||||
"$ref": "#/$defs/slice/github.com/databricks/databricks-sdk-go/service/serving.RateLimit"
|
"$ref": "#/$defs/slice/github.com/databricks/databricks-sdk-go/service/serving.RateLimit"
|
||||||
},
|
},
|
||||||
"route_optimized": {
|
"route_optimized": {
|
||||||
|
@ -747,6 +868,9 @@
|
||||||
{
|
{
|
||||||
"type": "object",
|
"type": "object",
|
||||||
"properties": {
|
"properties": {
|
||||||
|
"cluster_id": {
|
||||||
|
"$ref": "#/$defs/string"
|
||||||
|
},
|
||||||
"compute_id": {
|
"compute_id": {
|
||||||
"$ref": "#/$defs/string"
|
"$ref": "#/$defs/string"
|
||||||
},
|
},
|
||||||
|
@ -923,6 +1047,9 @@
|
||||||
{
|
{
|
||||||
"type": "object",
|
"type": "object",
|
||||||
"properties": {
|
"properties": {
|
||||||
|
"clusters": {
|
||||||
|
"$ref": "#/$defs/map/github.com/databricks/cli/bundle/config/resources.Cluster"
|
||||||
|
},
|
||||||
"experiments": {
|
"experiments": {
|
||||||
"$ref": "#/$defs/map/github.com/databricks/cli/bundle/config/resources.MlflowExperiment"
|
"$ref": "#/$defs/map/github.com/databricks/cli/bundle/config/resources.MlflowExperiment"
|
||||||
},
|
},
|
||||||
|
@ -990,6 +1117,9 @@
|
||||||
"bundle": {
|
"bundle": {
|
||||||
"$ref": "#/$defs/github.com/databricks/cli/bundle/config.Bundle"
|
"$ref": "#/$defs/github.com/databricks/cli/bundle/config.Bundle"
|
||||||
},
|
},
|
||||||
|
"cluster_id": {
|
||||||
|
"$ref": "#/$defs/string"
|
||||||
|
},
|
||||||
"compute_id": {
|
"compute_id": {
|
||||||
"$ref": "#/$defs/string"
|
"$ref": "#/$defs/string"
|
||||||
},
|
},
|
||||||
|
@ -2028,7 +2158,7 @@
|
||||||
},
|
},
|
||||||
"compute.RuntimeEngine": {
|
"compute.RuntimeEngine": {
|
||||||
"type": "string",
|
"type": "string",
|
||||||
"description": "Decides which runtime engine to be use, e.g. Standard vs. Photon. If unspecified, the runtime\nengine is inferred from spark_version.",
|
"description": "Determines the cluster's runtime engine, either standard or Photon.\n\nThis field is not compatible with legacy `spark_version` values that contain `-photon-`.\nRemove `-photon-` from the `spark_version` and set `runtime_engine` to `PHOTON`.\n\nIf left unspecified, the runtime engine defaults to standard unless the spark_version\ncontains -photon-, in which case Photon will be used.\n",
|
||||||
"enum": [
|
"enum": [
|
||||||
"NULL",
|
"NULL",
|
||||||
"STANDARD",
|
"STANDARD",
|
||||||
|
@ -2610,7 +2740,7 @@
|
||||||
"anyOf": [
|
"anyOf": [
|
||||||
{
|
{
|
||||||
"type": "object",
|
"type": "object",
|
||||||
"description": "Write-only setting, available only in Create/Update/Reset and Submit calls. Specifies the user or service principal that the job runs as. If not specified, the job runs as the user who created the job.\n\nOnly `user_name` or `service_principal_name` can be specified. If both are specified, an error is thrown.",
|
"description": "Write-only setting. Specifies the user, service principal or group that the job/pipeline runs as. If not specified, the job/pipeline runs as the user who created the job/pipeline.\n\nExactly one of `user_name`, `service_principal_name`, `group_name` should be specified. If not, an error is thrown.",
|
||||||
"properties": {
|
"properties": {
|
||||||
"service_principal_name": {
|
"service_principal_name": {
|
||||||
"description": "Application ID of an active service principal. Setting this field requires the `servicePrincipal/user` role.",
|
"description": "Application ID of an active service principal. Setting this field requires the `servicePrincipal/user` role.",
|
||||||
|
@ -4904,6 +5034,20 @@
|
||||||
"cli": {
|
"cli": {
|
||||||
"bundle": {
|
"bundle": {
|
||||||
"config": {
|
"config": {
|
||||||
|
"resources.Cluster": {
|
||||||
|
"anyOf": [
|
||||||
|
{
|
||||||
|
"type": "object",
|
||||||
|
"additionalProperties": {
|
||||||
|
"$ref": "#/$defs/github.com/databricks/cli/bundle/config/resources.Cluster"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "string",
|
||||||
|
"pattern": "\\$\\{(var(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)+)\\}"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
"resources.Job": {
|
"resources.Job": {
|
||||||
"anyOf": [
|
"anyOf": [
|
||||||
{
|
{
|
||||||
|
|
|
@ -30,7 +30,7 @@ func (m *script) Name() string {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *script) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
func (m *script) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||||
executor, err := exec.NewCommandExecutor(b.RootPath)
|
executor, err := exec.NewCommandExecutor(b.BundleRootPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return diag.FromErr(err)
|
return diag.FromErr(err)
|
||||||
}
|
}
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue