mirror of https://github.com/databricks/cli.git
Merge remote-tracking branch 'origin' into feature/uc-volumes
This commit is contained in:
commit
e43f566579
|
@ -1 +1 @@
|
|||
d05898328669a3f8ab0c2ecee37db2673d3ea3f7
|
||||
0c86ea6dbd9a730c24ff0d4e509603e476955ac5
|
|
@ -5,6 +5,7 @@ package {{(.TrimPrefix "account").SnakeName}}
|
|||
import (
|
||||
"github.com/databricks/cli/libs/cmdio"
|
||||
"github.com/databricks/cli/libs/flags"
|
||||
"github.com/databricks/cli/libs/diag"
|
||||
"github.com/databricks/cli/cmd/root"
|
||||
"github.com/databricks/databricks-sdk-go/service/{{.Package.Name}}"
|
||||
"github.com/spf13/cobra"
|
||||
|
@ -231,10 +232,16 @@ func new{{.PascalName}}() *cobra.Command {
|
|||
{{- if .Request }}
|
||||
{{ if .CanUseJson }}
|
||||
if cmd.Flags().Changed("json") {
|
||||
err = {{.CamelName}}Json.Unmarshal(&{{.CamelName}}Req)
|
||||
diags := {{.CamelName}}Json.Unmarshal(&{{.CamelName}}Req)
|
||||
if diags.HasError() {
|
||||
return diags.Error()
|
||||
}
|
||||
if len(diags) > 0 {
|
||||
err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}{{end}}{{ if .MustUseJson }}else {
|
||||
return fmt.Errorf("please provide command input in JSON format by specifying the --json flag")
|
||||
}{{- end}}
|
||||
|
|
|
@ -6,6 +6,7 @@ cmd/account/cmd.go linguist-generated=true
|
|||
cmd/account/credentials/credentials.go linguist-generated=true
|
||||
cmd/account/csp-enablement-account/csp-enablement-account.go linguist-generated=true
|
||||
cmd/account/custom-app-integration/custom-app-integration.go linguist-generated=true
|
||||
cmd/account/disable-legacy-features/disable-legacy-features.go linguist-generated=true
|
||||
cmd/account/encryption-keys/encryption-keys.go linguist-generated=true
|
||||
cmd/account/esm-enablement-account/esm-enablement-account.go linguist-generated=true
|
||||
cmd/account/groups/groups.go linguist-generated=true
|
||||
|
@ -52,6 +53,7 @@ cmd/workspace/dashboard-widgets/dashboard-widgets.go linguist-generated=true
|
|||
cmd/workspace/dashboards/dashboards.go linguist-generated=true
|
||||
cmd/workspace/data-sources/data-sources.go linguist-generated=true
|
||||
cmd/workspace/default-namespace/default-namespace.go linguist-generated=true
|
||||
cmd/workspace/disable-legacy-access/disable-legacy-access.go linguist-generated=true
|
||||
cmd/workspace/enhanced-security-monitoring/enhanced-security-monitoring.go linguist-generated=true
|
||||
cmd/workspace/experiments/experiments.go linguist-generated=true
|
||||
cmd/workspace/external-locations/external-locations.go linguist-generated=true
|
||||
|
@ -108,6 +110,7 @@ cmd/workspace/storage-credentials/storage-credentials.go linguist-generated=true
|
|||
cmd/workspace/system-schemas/system-schemas.go linguist-generated=true
|
||||
cmd/workspace/table-constraints/table-constraints.go linguist-generated=true
|
||||
cmd/workspace/tables/tables.go linguist-generated=true
|
||||
cmd/workspace/temporary-table-credentials/temporary-table-credentials.go linguist-generated=true
|
||||
cmd/workspace/token-management/token-management.go linguist-generated=true
|
||||
cmd/workspace/tokens/tokens.go linguist-generated=true
|
||||
cmd/workspace/users/users.go linguist-generated=true
|
||||
|
|
|
@ -33,7 +33,7 @@ jobs:
|
|||
- name: Setup Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: 1.22.x
|
||||
go-version: 1.22.7
|
||||
|
||||
- name: Setup Python
|
||||
uses: actions/setup-python@v5
|
||||
|
@ -68,7 +68,7 @@ jobs:
|
|||
- name: Setup Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: 1.22.x
|
||||
go-version: 1.22.7
|
||||
|
||||
# No need to download cached dependencies when running gofmt.
|
||||
cache: false
|
||||
|
@ -100,7 +100,7 @@ jobs:
|
|||
- name: Setup Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: 1.22.x
|
||||
go-version: 1.22.7
|
||||
|
||||
# Github repo: https://github.com/ajv-validator/ajv-cli
|
||||
- name: Install ajv-cli
|
||||
|
|
|
@ -21,7 +21,7 @@ jobs:
|
|||
- name: Setup Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: 1.22.x
|
||||
go-version: 1.22.7
|
||||
|
||||
# The default cache key for this action considers only the `go.sum` file.
|
||||
# We include .goreleaser.yaml here to differentiate from the cache used by the push action
|
||||
|
|
|
@ -22,7 +22,7 @@ jobs:
|
|||
- name: Setup Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: 1.22.x
|
||||
go-version: 1.22.7
|
||||
|
||||
# The default cache key for this action considers only the `go.sum` file.
|
||||
# We include .goreleaser.yaml here to differentiate from the cache used by the push action
|
||||
|
|
95
CHANGELOG.md
95
CHANGELOG.md
|
@ -1,5 +1,100 @@
|
|||
# Version changelog
|
||||
|
||||
## [Release] Release v0.230.0
|
||||
|
||||
Notable changes for Databricks Asset Bundles:
|
||||
|
||||
Workspace paths are automatically prefixed with `/Workspace`. In addition, all usage of path strings such as `/Workspace/${workspace.root_path}/...` in bundle configuration is automatically replaced with `${workspace.root_path}/...` and generates a warning as part of bundle validate.
|
||||
|
||||
More details can be found here: https://docs.databricks.com/en/release-notes/dev-tools/bundles.html#workspace-paths
|
||||
|
||||
Bundles:
|
||||
* Add an error if state files grow bigger than the export limit ([#1795](https://github.com/databricks/cli/pull/1795)).
|
||||
* Always prepend bundle remote paths with /Workspace ([#1724](https://github.com/databricks/cli/pull/1724)).
|
||||
* Add resource path field to bundle workspace configuration ([#1800](https://github.com/databricks/cli/pull/1800)).
|
||||
* Add validation for files with a `.(resource-name).yml` extension ([#1780](https://github.com/databricks/cli/pull/1780)).
|
||||
|
||||
Internal:
|
||||
* Remove deprecated or readonly fields from the bundle schema ([#1809](https://github.com/databricks/cli/pull/1809)).
|
||||
|
||||
API Changes:
|
||||
* Changed `databricks git-credentials create`, `databricks git-credentials delete`, `databricks git-credentials get`, `databricks git-credentials list`, `databricks git-credentials update` commands .
|
||||
* Changed `databricks repos create`, `databricks repos delete`, `databricks repos get`, `databricks repos update` command .
|
||||
|
||||
OpenAPI commit 0c86ea6dbd9a730c24ff0d4e509603e476955ac5 (2024-10-02)
|
||||
Dependency updates:
|
||||
* Upgrade TF provider to 1.53.0 ([#1815](https://github.com/databricks/cli/pull/1815)).
|
||||
* Bump golang.org/x/term from 0.24.0 to 0.25.0 ([#1811](https://github.com/databricks/cli/pull/1811)).
|
||||
* Bump golang.org/x/text from 0.18.0 to 0.19.0 ([#1812](https://github.com/databricks/cli/pull/1812)).
|
||||
* Bump github.com/databricks/databricks-sdk-go from 0.47.0 to 0.48.0 ([#1810](https://github.com/databricks/cli/pull/1810)).
|
||||
|
||||
## [Release] Release v0.229.0
|
||||
|
||||
Bundles:
|
||||
* Added support for creating all-purpose clusters ([#1698](https://github.com/databricks/cli/pull/1698)).
|
||||
* Reduce time until the prompt is shown for bundle run ([#1727](https://github.com/databricks/cli/pull/1727)).
|
||||
* Use Unity Catalog for pipelines in the default-python template ([#1766](https://github.com/databricks/cli/pull/1766)).
|
||||
* Add verbose flag to the "bundle deploy" command ([#1774](https://github.com/databricks/cli/pull/1774)).
|
||||
* Fixed full variable override detection ([#1787](https://github.com/databricks/cli/pull/1787)).
|
||||
* Add sub-extension to resource files in built-in templates ([#1777](https://github.com/databricks/cli/pull/1777)).
|
||||
* Fix panic in `apply_presets.go` ([#1796](https://github.com/databricks/cli/pull/1796)).
|
||||
|
||||
Internal:
|
||||
* Assert tokens are redacted in origin URL when username is not specified ([#1785](https://github.com/databricks/cli/pull/1785)).
|
||||
* Refactor jobs path translation ([#1782](https://github.com/databricks/cli/pull/1782)).
|
||||
* Add JobTaskClusterSpec validate mutator ([#1784](https://github.com/databricks/cli/pull/1784)).
|
||||
* Pin Go toolchain to 1.22.7 ([#1790](https://github.com/databricks/cli/pull/1790)).
|
||||
* Modify SetLocation test utility to take full locations as argument ([#1788](https://github.com/databricks/cli/pull/1788)).
|
||||
* Simplified isFullVariableOverrideDef implementation ([#1791](https://github.com/databricks/cli/pull/1791)).
|
||||
* Sort tasks by `task_key` before generating the Terraform configuration ([#1776](https://github.com/databricks/cli/pull/1776)).
|
||||
* Trim trailing whitespace ([#1794](https://github.com/databricks/cli/pull/1794)).
|
||||
* Move trampoline code into trampoline package ([#1793](https://github.com/databricks/cli/pull/1793)).
|
||||
* Rename `RootPath` -> `BundleRootPath` ([#1792](https://github.com/databricks/cli/pull/1792)).
|
||||
|
||||
API Changes:
|
||||
* Changed `databricks apps delete` command to return .
|
||||
* Changed `databricks apps deploy` command with new required argument order.
|
||||
* Changed `databricks apps start` command to return .
|
||||
* Changed `databricks apps stop` command to return .
|
||||
* Added `databricks temporary-table-credentials` command group.
|
||||
* Added `databricks serving-endpoints put-ai-gateway` command.
|
||||
* Added `databricks disable-legacy-access` command group.
|
||||
* Added `databricks account disable-legacy-features` command group.
|
||||
|
||||
OpenAPI commit 6f6b1371e640f2dfeba72d365ac566368656f6b6 (2024-09-19)
|
||||
Dependency updates:
|
||||
* Upgrade to Go SDK 0.47.0 ([#1799](https://github.com/databricks/cli/pull/1799)).
|
||||
* Upgrade to TF provider 1.52 ([#1781](https://github.com/databricks/cli/pull/1781)).
|
||||
* Bump golang.org/x/mod from 0.20.0 to 0.21.0 ([#1758](https://github.com/databricks/cli/pull/1758)).
|
||||
* Bump github.com/hashicorp/hc-install from 0.7.0 to 0.9.0 ([#1772](https://github.com/databricks/cli/pull/1772)).
|
||||
|
||||
## [Release] Release v0.228.1
|
||||
|
||||
Bundles:
|
||||
* Added listing cluster filtering for cluster lookups ([#1754](https://github.com/databricks/cli/pull/1754)).
|
||||
* Expand library globs relative to the sync root ([#1756](https://github.com/databricks/cli/pull/1756)).
|
||||
* Fixed generated YAML missing 'default' for empty values ([#1765](https://github.com/databricks/cli/pull/1765)).
|
||||
* Use periodic triggers in all templates ([#1739](https://github.com/databricks/cli/pull/1739)).
|
||||
* Use the friendly name of service principals when shortening their name ([#1770](https://github.com/databricks/cli/pull/1770)).
|
||||
* Fixed detecting full syntax variable override which includes type field ([#1775](https://github.com/databricks/cli/pull/1775)).
|
||||
|
||||
Internal:
|
||||
* Pass copy of `dyn.Path` to callback function ([#1747](https://github.com/databricks/cli/pull/1747)).
|
||||
* Make bundle JSON schema modular with `` ([#1700](https://github.com/databricks/cli/pull/1700)).
|
||||
* Alias variables block in the `Target` struct ([#1748](https://github.com/databricks/cli/pull/1748)).
|
||||
* Add end to end integration tests for bundle JSON schema ([#1726](https://github.com/databricks/cli/pull/1726)).
|
||||
* Fix artifact upload integration tests ([#1767](https://github.com/databricks/cli/pull/1767)).
|
||||
|
||||
API Changes:
|
||||
* Added `databricks quality-monitors regenerate-dashboard` command.
|
||||
|
||||
OpenAPI commit d05898328669a3f8ab0c2ecee37db2673d3ea3f7 (2024-09-04)
|
||||
Dependency updates:
|
||||
* Bump golang.org/x/term from 0.23.0 to 0.24.0 ([#1757](https://github.com/databricks/cli/pull/1757)).
|
||||
* Bump golang.org/x/oauth2 from 0.22.0 to 0.23.0 ([#1761](https://github.com/databricks/cli/pull/1761)).
|
||||
* Bump golang.org/x/text from 0.17.0 to 0.18.0 ([#1759](https://github.com/databricks/cli/pull/1759)).
|
||||
* Bump github.com/databricks/databricks-sdk-go from 0.45.0 to 0.46.0 ([#1760](https://github.com/databricks/cli/pull/1760)).
|
||||
|
||||
## [Release] Release v0.228.0
|
||||
|
||||
CLI:
|
||||
|
|
|
@ -10,6 +10,7 @@ import (
|
|||
"github.com/databricks/cli/bundle/config"
|
||||
"github.com/databricks/cli/bundle/internal/bundletest"
|
||||
"github.com/databricks/cli/internal/testutil"
|
||||
"github.com/databricks/cli/libs/dyn"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
@ -23,7 +24,7 @@ func TestExpandGlobs_Nominal(t *testing.T) {
|
|||
testutil.Touch(t, tmpDir, "bc.txt")
|
||||
|
||||
b := &bundle.Bundle{
|
||||
RootPath: tmpDir,
|
||||
BundleRootPath: tmpDir,
|
||||
Config: config.Root{
|
||||
Artifacts: config.Artifacts{
|
||||
"test": {
|
||||
|
@ -36,7 +37,7 @@ func TestExpandGlobs_Nominal(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
bundletest.SetLocation(b, "artifacts", filepath.Join(tmpDir, "databricks.yml"))
|
||||
bundletest.SetLocation(b, "artifacts", []dyn.Location{{File: filepath.Join(tmpDir, "databricks.yml")}})
|
||||
|
||||
ctx := context.Background()
|
||||
diags := bundle.Apply(ctx, b, bundle.Seq(
|
||||
|
@ -62,7 +63,7 @@ func TestExpandGlobs_InvalidPattern(t *testing.T) {
|
|||
tmpDir := t.TempDir()
|
||||
|
||||
b := &bundle.Bundle{
|
||||
RootPath: tmpDir,
|
||||
BundleRootPath: tmpDir,
|
||||
Config: config.Root{
|
||||
Artifacts: config.Artifacts{
|
||||
"test": {
|
||||
|
@ -77,7 +78,7 @@ func TestExpandGlobs_InvalidPattern(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
bundletest.SetLocation(b, "artifacts", filepath.Join(tmpDir, "databricks.yml"))
|
||||
bundletest.SetLocation(b, "artifacts", []dyn.Location{{File: filepath.Join(tmpDir, "databricks.yml")}})
|
||||
|
||||
ctx := context.Background()
|
||||
diags := bundle.Apply(ctx, b, bundle.Seq(
|
||||
|
@ -110,7 +111,7 @@ func TestExpandGlobs_NoMatches(t *testing.T) {
|
|||
testutil.Touch(t, tmpDir, "b2.txt")
|
||||
|
||||
b := &bundle.Bundle{
|
||||
RootPath: tmpDir,
|
||||
BundleRootPath: tmpDir,
|
||||
Config: config.Root{
|
||||
Artifacts: config.Artifacts{
|
||||
"test": {
|
||||
|
@ -125,7 +126,7 @@ func TestExpandGlobs_NoMatches(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
bundletest.SetLocation(b, "artifacts", filepath.Join(tmpDir, "databricks.yml"))
|
||||
bundletest.SetLocation(b, "artifacts", []dyn.Location{{File: filepath.Join(tmpDir, "databricks.yml")}})
|
||||
|
||||
ctx := context.Background()
|
||||
diags := bundle.Apply(ctx, b, bundle.Seq(
|
||||
|
|
|
@ -47,7 +47,7 @@ func (m *prepare) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics
|
|||
|
||||
// If artifact path is not provided, use bundle root dir
|
||||
if artifact.Path == "" {
|
||||
artifact.Path = b.RootPath
|
||||
artifact.Path = b.BundleRootPath
|
||||
}
|
||||
|
||||
if !filepath.IsAbs(artifact.Path) {
|
||||
|
|
|
@ -35,21 +35,21 @@ func (m *detectPkg) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostic
|
|||
log.Infof(ctx, "Detecting Python wheel project...")
|
||||
|
||||
// checking if there is setup.py in the bundle root
|
||||
setupPy := filepath.Join(b.RootPath, "setup.py")
|
||||
setupPy := filepath.Join(b.BundleRootPath, "setup.py")
|
||||
_, err := os.Stat(setupPy)
|
||||
if err != nil {
|
||||
log.Infof(ctx, "No Python wheel project found at bundle root folder")
|
||||
return nil
|
||||
}
|
||||
|
||||
log.Infof(ctx, fmt.Sprintf("Found Python wheel project at %s", b.RootPath))
|
||||
log.Infof(ctx, fmt.Sprintf("Found Python wheel project at %s", b.BundleRootPath))
|
||||
module := extractModuleName(setupPy)
|
||||
|
||||
if b.Config.Artifacts == nil {
|
||||
b.Config.Artifacts = make(map[string]*config.Artifact)
|
||||
}
|
||||
|
||||
pkgPath, err := filepath.Abs(b.RootPath)
|
||||
pkgPath, err := filepath.Abs(b.BundleRootPath)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
|
|
@ -31,22 +31,26 @@ import (
|
|||
const internalFolder = ".internal"
|
||||
|
||||
type Bundle struct {
|
||||
// RootPath contains the directory path to the root of the bundle.
|
||||
// BundleRootPath is the local path to the root directory of the bundle.
|
||||
// It is set when we instantiate a new bundle instance.
|
||||
RootPath string
|
||||
BundleRootPath string
|
||||
|
||||
// BundleRoot is a virtual filesystem path to the root of the bundle.
|
||||
// BundleRoot is a virtual filesystem path to [BundleRootPath].
|
||||
// Exclusively use this field for filesystem operations.
|
||||
BundleRoot vfs.Path
|
||||
|
||||
// SyncRoot is a virtual filesystem path to the root directory of the files that are synchronized to the workspace.
|
||||
// It can be an ancestor to [BundleRoot], but not a descendant; that is, [SyncRoot] must contain [BundleRoot].
|
||||
SyncRoot vfs.Path
|
||||
|
||||
// SyncRootPath is the local path to the root directory of files that are synchronized to the workspace.
|
||||
// It is equal to `SyncRoot.Native()` and included as dedicated field for convenient access.
|
||||
// By default, it is the same as [BundleRootPath].
|
||||
// If it is different, it must be an ancestor to [BundleRootPath].
|
||||
// That is, [SyncRootPath] must contain [BundleRootPath].
|
||||
SyncRootPath string
|
||||
|
||||
// SyncRoot is a virtual filesystem path to [SyncRootPath].
|
||||
// Exclusively use this field for filesystem operations.
|
||||
SyncRoot vfs.Path
|
||||
|
||||
// Config contains the bundle configuration.
|
||||
// It is loaded from the bundle configuration files and mutators may update it.
|
||||
Config config.Root
|
||||
|
||||
// Metadata about the bundle deployment. This is the interface Databricks services
|
||||
|
@ -84,14 +88,14 @@ type Bundle struct {
|
|||
|
||||
func Load(ctx context.Context, path string) (*Bundle, error) {
|
||||
b := &Bundle{
|
||||
RootPath: filepath.Clean(path),
|
||||
BundleRootPath: filepath.Clean(path),
|
||||
BundleRoot: vfs.MustNew(path),
|
||||
}
|
||||
configFile, err := config.FileNames.FindInPath(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
log.Debugf(ctx, "Found bundle root at %s (file %s)", b.RootPath, configFile)
|
||||
log.Debugf(ctx, "Found bundle root at %s (file %s)", b.BundleRootPath, configFile)
|
||||
return b, nil
|
||||
}
|
||||
|
||||
|
@ -160,7 +164,7 @@ func (b *Bundle) CacheDir(ctx context.Context, paths ...string) (string, error)
|
|||
if !exists || cacheDirName == "" {
|
||||
cacheDirName = filepath.Join(
|
||||
// Anchor at bundle root directory.
|
||||
b.RootPath,
|
||||
b.BundleRootPath,
|
||||
// Static cache directory.
|
||||
".databricks",
|
||||
"bundle",
|
||||
|
@ -212,7 +216,7 @@ func (b *Bundle) GetSyncIncludePatterns(ctx context.Context) ([]string, error) {
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
internalDirRel, err := filepath.Rel(b.RootPath, internalDir)
|
||||
internalDirRel, err := filepath.Rel(b.BundleRootPath, internalDir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -21,7 +21,7 @@ func (r ReadOnlyBundle) Config() config.Root {
|
|||
}
|
||||
|
||||
func (r ReadOnlyBundle) RootPath() string {
|
||||
return r.b.RootPath
|
||||
return r.b.BundleRootPath
|
||||
}
|
||||
|
||||
func (r ReadOnlyBundle) BundleRoot() vfs.Path {
|
||||
|
|
|
@ -79,7 +79,7 @@ func TestBundleMustLoadSuccess(t *testing.T) {
|
|||
t.Setenv(env.RootVariable, "./tests/basic")
|
||||
b, err := MustLoad(context.Background())
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "tests/basic", filepath.ToSlash(b.RootPath))
|
||||
assert.Equal(t, "tests/basic", filepath.ToSlash(b.BundleRootPath))
|
||||
}
|
||||
|
||||
func TestBundleMustLoadFailureWithEnv(t *testing.T) {
|
||||
|
@ -98,7 +98,7 @@ func TestBundleTryLoadSuccess(t *testing.T) {
|
|||
t.Setenv(env.RootVariable, "./tests/basic")
|
||||
b, err := TryLoad(context.Background())
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "tests/basic", filepath.ToSlash(b.RootPath))
|
||||
assert.Equal(t, "tests/basic", filepath.ToSlash(b.BundleRootPath))
|
||||
}
|
||||
|
||||
func TestBundleTryLoadFailureWithEnv(t *testing.T) {
|
||||
|
|
|
@ -38,8 +38,11 @@ type Bundle struct {
|
|||
// Annotated readonly as this should be set at the target level.
|
||||
Mode Mode `json:"mode,omitempty" bundle:"readonly"`
|
||||
|
||||
// Overrides the compute used for jobs and other supported assets.
|
||||
ComputeID string `json:"compute_id,omitempty"`
|
||||
// DEPRECATED: Overrides the compute used for jobs and other supported assets.
|
||||
ComputeId string `json:"compute_id,omitempty"`
|
||||
|
||||
// Overrides the cluster used for jobs and other supported assets.
|
||||
ClusterId string `json:"cluster_id,omitempty"`
|
||||
|
||||
// Deployment section specifies deployment related configuration for bundle
|
||||
Deployment Deployment `json:"deployment,omitempty"`
|
||||
|
|
|
@ -20,7 +20,7 @@ func (m *entryPoint) Name() string {
|
|||
}
|
||||
|
||||
func (m *entryPoint) Apply(_ context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||
path, err := config.FileNames.FindInPath(b.RootPath)
|
||||
path, err := config.FileNames.FindInPath(b.BundleRootPath)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
|
|
@ -18,7 +18,7 @@ func TestEntryPointNoRootPath(t *testing.T) {
|
|||
|
||||
func TestEntryPoint(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
RootPath: "testdata",
|
||||
BundleRootPath: "testdata/basic",
|
||||
}
|
||||
diags := bundle.Apply(context.Background(), b, loader.EntryPoint())
|
||||
require.NoError(t, diags.Error())
|
||||
|
|
|
@ -3,12 +3,135 @@ package loader
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"slices"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/bundle/config"
|
||||
"github.com/databricks/cli/libs/diag"
|
||||
"github.com/databricks/cli/libs/dyn"
|
||||
)
|
||||
|
||||
func validateFileFormat(configRoot dyn.Value, filePath string) diag.Diagnostics {
|
||||
for _, resourceDescription := range config.SupportedResources() {
|
||||
singularName := resourceDescription.SingularName
|
||||
|
||||
for _, yamlExt := range []string{"yml", "yaml"} {
|
||||
ext := fmt.Sprintf(".%s.%s", singularName, yamlExt)
|
||||
if strings.HasSuffix(filePath, ext) {
|
||||
return validateSingleResourceDefined(configRoot, ext, singularName)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func validateSingleResourceDefined(configRoot dyn.Value, ext, typ string) diag.Diagnostics {
|
||||
type resource struct {
|
||||
path dyn.Path
|
||||
value dyn.Value
|
||||
typ string
|
||||
key string
|
||||
}
|
||||
|
||||
resources := []resource{}
|
||||
supportedResources := config.SupportedResources()
|
||||
|
||||
// Gather all resources defined in the resources block.
|
||||
_, err := dyn.MapByPattern(
|
||||
configRoot,
|
||||
dyn.NewPattern(dyn.Key("resources"), dyn.AnyKey(), dyn.AnyKey()),
|
||||
func(p dyn.Path, v dyn.Value) (dyn.Value, error) {
|
||||
// The key for the resource, e.g. "my_job" for jobs.my_job.
|
||||
k := p[2].Key()
|
||||
// The type of the resource, e.g. "job" for jobs.my_job.
|
||||
typ := supportedResources[p[1].Key()].SingularName
|
||||
|
||||
resources = append(resources, resource{path: p, value: v, typ: typ, key: k})
|
||||
return v, nil
|
||||
})
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
||||
// Gather all resources defined in a target block.
|
||||
_, err = dyn.MapByPattern(
|
||||
configRoot,
|
||||
dyn.NewPattern(dyn.Key("targets"), dyn.AnyKey(), dyn.Key("resources"), dyn.AnyKey(), dyn.AnyKey()),
|
||||
func(p dyn.Path, v dyn.Value) (dyn.Value, error) {
|
||||
// The key for the resource, e.g. "my_job" for jobs.my_job.
|
||||
k := p[4].Key()
|
||||
// The type of the resource, e.g. "job" for jobs.my_job.
|
||||
typ := supportedResources[p[3].Key()].SingularName
|
||||
|
||||
resources = append(resources, resource{path: p, value: v, typ: typ, key: k})
|
||||
return v, nil
|
||||
})
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
||||
typeMatch := true
|
||||
seenKeys := map[string]struct{}{}
|
||||
for _, rr := range resources {
|
||||
// case: The resource is not of the correct type.
|
||||
if rr.typ != typ {
|
||||
typeMatch = false
|
||||
break
|
||||
}
|
||||
|
||||
seenKeys[rr.key] = struct{}{}
|
||||
}
|
||||
|
||||
// Format matches. There's at most one resource defined in the file.
|
||||
// The resource is also of the correct type.
|
||||
if typeMatch && len(seenKeys) <= 1 {
|
||||
return nil
|
||||
}
|
||||
|
||||
detail := strings.Builder{}
|
||||
detail.WriteString("The following resources are defined or configured in this file:\n")
|
||||
lines := []string{}
|
||||
for _, r := range resources {
|
||||
lines = append(lines, fmt.Sprintf(" - %s (%s)\n", r.key, r.typ))
|
||||
}
|
||||
// Sort the lines to print to make the output deterministic.
|
||||
sort.Strings(lines)
|
||||
// Compact the lines before writing them to the message to remove any duplicate lines.
|
||||
// This is needed because we do not dedup earlier when gathering the resources
|
||||
// and it's valid to define the same resource in both the resources and targets block.
|
||||
lines = slices.Compact(lines)
|
||||
for _, l := range lines {
|
||||
detail.WriteString(l)
|
||||
}
|
||||
|
||||
locations := []dyn.Location{}
|
||||
paths := []dyn.Path{}
|
||||
for _, rr := range resources {
|
||||
locations = append(locations, rr.value.Locations()...)
|
||||
paths = append(paths, rr.path)
|
||||
}
|
||||
// Sort the locations and paths to make the output deterministic.
|
||||
sort.Slice(locations, func(i, j int) bool {
|
||||
return locations[i].String() < locations[j].String()
|
||||
})
|
||||
sort.Slice(paths, func(i, j int) bool {
|
||||
return paths[i].String() < paths[j].String()
|
||||
})
|
||||
|
||||
return diag.Diagnostics{
|
||||
{
|
||||
Severity: diag.Recommendation,
|
||||
Summary: fmt.Sprintf("define a single %s in a file with the %s extension.", strings.ReplaceAll(typ, "_", " "), ext),
|
||||
Detail: detail.String(),
|
||||
Locations: locations,
|
||||
Paths: paths,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
type processInclude struct {
|
||||
fullPath string
|
||||
relPath string
|
||||
|
@ -31,6 +154,13 @@ func (m *processInclude) Apply(_ context.Context, b *bundle.Bundle) diag.Diagnos
|
|||
if diags.HasError() {
|
||||
return diags
|
||||
}
|
||||
|
||||
// Add any diagnostics associated with the file format.
|
||||
diags = append(diags, validateFileFormat(this.Value(), m.relPath)...)
|
||||
if diags.HasError() {
|
||||
return diags
|
||||
}
|
||||
|
||||
err := b.Config.Merge(this)
|
||||
if err != nil {
|
||||
diags = diags.Extend(diag.FromErr(err))
|
||||
|
|
|
@ -8,13 +8,15 @@ import (
|
|||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/bundle/config"
|
||||
"github.com/databricks/cli/bundle/config/loader"
|
||||
"github.com/databricks/cli/libs/diag"
|
||||
"github.com/databricks/cli/libs/dyn"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestProcessInclude(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
RootPath: "testdata",
|
||||
BundleRootPath: "testdata/basic",
|
||||
Config: config.Root{
|
||||
Workspace: config.Workspace{
|
||||
Host: "foo",
|
||||
|
@ -22,7 +24,7 @@ func TestProcessInclude(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
m := loader.ProcessInclude(filepath.Join(b.RootPath, "host.yml"), "host.yml")
|
||||
m := loader.ProcessInclude(filepath.Join(b.BundleRootPath, "host.yml"), "host.yml")
|
||||
assert.Equal(t, "ProcessInclude(host.yml)", m.Name())
|
||||
|
||||
// Assert the host value prior to applying the mutator
|
||||
|
@ -33,3 +35,184 @@ func TestProcessInclude(t *testing.T) {
|
|||
require.NoError(t, diags.Error())
|
||||
assert.Equal(t, "bar", b.Config.Workspace.Host)
|
||||
}
|
||||
|
||||
func TestProcessIncludeFormatMatch(t *testing.T) {
|
||||
for _, fileName := range []string{
|
||||
"one_job.job.yml",
|
||||
"one_pipeline.pipeline.yaml",
|
||||
"two_job.yml",
|
||||
"job_and_pipeline.yml",
|
||||
"multiple_resources.yml",
|
||||
} {
|
||||
t.Run(fileName, func(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
BundleRootPath: "testdata/format_match",
|
||||
Config: config.Root{
|
||||
Bundle: config.Bundle{
|
||||
Name: "format_test",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
m := loader.ProcessInclude(filepath.Join(b.BundleRootPath, fileName), fileName)
|
||||
diags := bundle.Apply(context.Background(), b, m)
|
||||
assert.Empty(t, diags)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestProcessIncludeFormatNotMatch(t *testing.T) {
|
||||
for fileName, expectedDiags := range map[string]diag.Diagnostics{
|
||||
"single_job.pipeline.yaml": {
|
||||
{
|
||||
Severity: diag.Recommendation,
|
||||
Summary: "define a single pipeline in a file with the .pipeline.yaml extension.",
|
||||
Detail: "The following resources are defined or configured in this file:\n - job1 (job)\n",
|
||||
Locations: []dyn.Location{
|
||||
{File: filepath.FromSlash("testdata/format_not_match/single_job.pipeline.yaml"), Line: 11, Column: 11},
|
||||
{File: filepath.FromSlash("testdata/format_not_match/single_job.pipeline.yaml"), Line: 4, Column: 7},
|
||||
},
|
||||
Paths: []dyn.Path{
|
||||
dyn.MustPathFromString("resources.jobs.job1"),
|
||||
dyn.MustPathFromString("targets.target1.resources.jobs.job1"),
|
||||
},
|
||||
},
|
||||
},
|
||||
"job_and_pipeline.job.yml": {
|
||||
{
|
||||
Severity: diag.Recommendation,
|
||||
Summary: "define a single job in a file with the .job.yml extension.",
|
||||
Detail: "The following resources are defined or configured in this file:\n - job1 (job)\n - pipeline1 (pipeline)\n",
|
||||
Locations: []dyn.Location{
|
||||
{File: filepath.FromSlash("testdata/format_not_match/job_and_pipeline.job.yml"), Line: 11, Column: 11},
|
||||
{File: filepath.FromSlash("testdata/format_not_match/job_and_pipeline.job.yml"), Line: 4, Column: 7},
|
||||
},
|
||||
Paths: []dyn.Path{
|
||||
dyn.MustPathFromString("resources.pipelines.pipeline1"),
|
||||
dyn.MustPathFromString("targets.target1.resources.jobs.job1"),
|
||||
},
|
||||
},
|
||||
},
|
||||
"job_and_pipeline.experiment.yml": {
|
||||
{
|
||||
Severity: diag.Recommendation,
|
||||
Summary: "define a single experiment in a file with the .experiment.yml extension.",
|
||||
Detail: "The following resources are defined or configured in this file:\n - job1 (job)\n - pipeline1 (pipeline)\n",
|
||||
Locations: []dyn.Location{
|
||||
{File: filepath.FromSlash("testdata/format_not_match/job_and_pipeline.experiment.yml"), Line: 11, Column: 11},
|
||||
{File: filepath.FromSlash("testdata/format_not_match/job_and_pipeline.experiment.yml"), Line: 4, Column: 7},
|
||||
},
|
||||
Paths: []dyn.Path{
|
||||
dyn.MustPathFromString("resources.pipelines.pipeline1"),
|
||||
dyn.MustPathFromString("targets.target1.resources.jobs.job1"),
|
||||
},
|
||||
},
|
||||
},
|
||||
"two_jobs.job.yml": {
|
||||
{
|
||||
Severity: diag.Recommendation,
|
||||
Summary: "define a single job in a file with the .job.yml extension.",
|
||||
Detail: "The following resources are defined or configured in this file:\n - job1 (job)\n - job2 (job)\n",
|
||||
Locations: []dyn.Location{
|
||||
{File: filepath.FromSlash("testdata/format_not_match/two_jobs.job.yml"), Line: 4, Column: 7},
|
||||
{File: filepath.FromSlash("testdata/format_not_match/two_jobs.job.yml"), Line: 7, Column: 7},
|
||||
},
|
||||
Paths: []dyn.Path{
|
||||
dyn.MustPathFromString("resources.jobs.job1"),
|
||||
dyn.MustPathFromString("resources.jobs.job2"),
|
||||
},
|
||||
},
|
||||
},
|
||||
"second_job_in_target.job.yml": {
|
||||
{
|
||||
Severity: diag.Recommendation,
|
||||
Summary: "define a single job in a file with the .job.yml extension.",
|
||||
Detail: "The following resources are defined or configured in this file:\n - job1 (job)\n - job2 (job)\n",
|
||||
Locations: []dyn.Location{
|
||||
{File: filepath.FromSlash("testdata/format_not_match/second_job_in_target.job.yml"), Line: 11, Column: 11},
|
||||
{File: filepath.FromSlash("testdata/format_not_match/second_job_in_target.job.yml"), Line: 4, Column: 7},
|
||||
},
|
||||
Paths: []dyn.Path{
|
||||
dyn.MustPathFromString("resources.jobs.job1"),
|
||||
dyn.MustPathFromString("targets.target1.resources.jobs.job2"),
|
||||
},
|
||||
},
|
||||
},
|
||||
"two_jobs_in_target.job.yml": {
|
||||
{
|
||||
Severity: diag.Recommendation,
|
||||
Summary: "define a single job in a file with the .job.yml extension.",
|
||||
Detail: "The following resources are defined or configured in this file:\n - job1 (job)\n - job2 (job)\n",
|
||||
Locations: []dyn.Location{
|
||||
{File: filepath.FromSlash("testdata/format_not_match/two_jobs_in_target.job.yml"), Line: 6, Column: 11},
|
||||
{File: filepath.FromSlash("testdata/format_not_match/two_jobs_in_target.job.yml"), Line: 8, Column: 11},
|
||||
},
|
||||
Paths: []dyn.Path{
|
||||
dyn.MustPathFromString("targets.target1.resources.jobs.job1"),
|
||||
dyn.MustPathFromString("targets.target1.resources.jobs.job2"),
|
||||
},
|
||||
},
|
||||
},
|
||||
"multiple_resources.model_serving_endpoint.yml": {
|
||||
{
|
||||
Severity: diag.Recommendation,
|
||||
Summary: "define a single model serving endpoint in a file with the .model_serving_endpoint.yml extension.",
|
||||
Detail: `The following resources are defined or configured in this file:
|
||||
- experiment1 (experiment)
|
||||
- job1 (job)
|
||||
- job2 (job)
|
||||
- job3 (job)
|
||||
- model1 (model)
|
||||
- model_serving_endpoint1 (model_serving_endpoint)
|
||||
- pipeline1 (pipeline)
|
||||
- pipeline2 (pipeline)
|
||||
- quality_monitor1 (quality_monitor)
|
||||
- registered_model1 (registered_model)
|
||||
- schema1 (schema)
|
||||
`,
|
||||
Locations: []dyn.Location{
|
||||
{File: filepath.FromSlash("testdata/format_not_match/multiple_resources.model_serving_endpoint.yml"), Line: 12, Column: 7},
|
||||
{File: filepath.FromSlash("testdata/format_not_match/multiple_resources.model_serving_endpoint.yml"), Line: 14, Column: 7},
|
||||
{File: filepath.FromSlash("testdata/format_not_match/multiple_resources.model_serving_endpoint.yml"), Line: 18, Column: 7},
|
||||
{File: filepath.FromSlash("testdata/format_not_match/multiple_resources.model_serving_endpoint.yml"), Line: 22, Column: 7},
|
||||
{File: filepath.FromSlash("testdata/format_not_match/multiple_resources.model_serving_endpoint.yml"), Line: 24, Column: 7},
|
||||
{File: filepath.FromSlash("testdata/format_not_match/multiple_resources.model_serving_endpoint.yml"), Line: 28, Column: 7},
|
||||
{File: filepath.FromSlash("testdata/format_not_match/multiple_resources.model_serving_endpoint.yml"), Line: 35, Column: 11},
|
||||
{File: filepath.FromSlash("testdata/format_not_match/multiple_resources.model_serving_endpoint.yml"), Line: 39, Column: 11},
|
||||
{File: filepath.FromSlash("testdata/format_not_match/multiple_resources.model_serving_endpoint.yml"), Line: 43, Column: 11},
|
||||
{File: filepath.FromSlash("testdata/format_not_match/multiple_resources.model_serving_endpoint.yml"), Line: 4, Column: 7},
|
||||
{File: filepath.FromSlash("testdata/format_not_match/multiple_resources.model_serving_endpoint.yml"), Line: 8, Column: 7},
|
||||
},
|
||||
Paths: []dyn.Path{
|
||||
dyn.MustPathFromString("resources.experiments.experiment1"),
|
||||
dyn.MustPathFromString("resources.jobs.job1"),
|
||||
dyn.MustPathFromString("resources.jobs.job2"),
|
||||
dyn.MustPathFromString("resources.model_serving_endpoints.model_serving_endpoint1"),
|
||||
dyn.MustPathFromString("resources.models.model1"),
|
||||
dyn.MustPathFromString("resources.pipelines.pipeline1"),
|
||||
dyn.MustPathFromString("resources.pipelines.pipeline2"),
|
||||
dyn.MustPathFromString("resources.schemas.schema1"),
|
||||
dyn.MustPathFromString("targets.target1.resources.jobs.job3"),
|
||||
dyn.MustPathFromString("targets.target1.resources.quality_monitors.quality_monitor1"),
|
||||
dyn.MustPathFromString("targets.target1.resources.registered_models.registered_model1"),
|
||||
},
|
||||
},
|
||||
},
|
||||
} {
|
||||
t.Run(fileName, func(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
BundleRootPath: "testdata/format_not_match",
|
||||
Config: config.Root{
|
||||
Bundle: config.Bundle{
|
||||
Name: "format_test",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
m := loader.ProcessInclude(filepath.Join(b.BundleRootPath, fileName), fileName)
|
||||
diags := bundle.Apply(context.Background(), b, m)
|
||||
require.Len(t, diags, 1)
|
||||
assert.Equal(t, expectedDiags, diags)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
@ -47,7 +47,7 @@ func (m *processRootIncludes) Apply(ctx context.Context, b *bundle.Bundle) diag.
|
|||
}
|
||||
|
||||
// Anchor includes to the bundle root path.
|
||||
matches, err := filepath.Glob(filepath.Join(b.RootPath, entry))
|
||||
matches, err := filepath.Glob(filepath.Join(b.BundleRootPath, entry))
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
@ -61,7 +61,7 @@ func (m *processRootIncludes) Apply(ctx context.Context, b *bundle.Bundle) diag.
|
|||
// Filter matches to ones we haven't seen yet.
|
||||
var includes []string
|
||||
for _, match := range matches {
|
||||
rel, err := filepath.Rel(b.RootPath, match)
|
||||
rel, err := filepath.Rel(b.BundleRootPath, match)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
@ -76,7 +76,7 @@ func (m *processRootIncludes) Apply(ctx context.Context, b *bundle.Bundle) diag.
|
|||
slices.Sort(includes)
|
||||
files = append(files, includes...)
|
||||
for _, include := range includes {
|
||||
out = append(out, ProcessInclude(filepath.Join(b.RootPath, include), include))
|
||||
out = append(out, ProcessInclude(filepath.Join(b.BundleRootPath, include), include))
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -15,7 +15,7 @@ import (
|
|||
|
||||
func TestProcessRootIncludesEmpty(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
RootPath: ".",
|
||||
BundleRootPath: ".",
|
||||
}
|
||||
diags := bundle.Apply(context.Background(), b, loader.ProcessRootIncludes())
|
||||
require.NoError(t, diags.Error())
|
||||
|
@ -30,7 +30,7 @@ func TestProcessRootIncludesAbs(t *testing.T) {
|
|||
}
|
||||
|
||||
b := &bundle.Bundle{
|
||||
RootPath: ".",
|
||||
BundleRootPath: ".",
|
||||
Config: config.Root{
|
||||
Include: []string{
|
||||
"/tmp/*.yml",
|
||||
|
@ -44,7 +44,7 @@ func TestProcessRootIncludesAbs(t *testing.T) {
|
|||
|
||||
func TestProcessRootIncludesSingleGlob(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
RootPath: t.TempDir(),
|
||||
BundleRootPath: t.TempDir(),
|
||||
Config: config.Root{
|
||||
Include: []string{
|
||||
"*.yml",
|
||||
|
@ -52,9 +52,9 @@ func TestProcessRootIncludesSingleGlob(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
testutil.Touch(t, b.RootPath, "databricks.yml")
|
||||
testutil.Touch(t, b.RootPath, "a.yml")
|
||||
testutil.Touch(t, b.RootPath, "b.yml")
|
||||
testutil.Touch(t, b.BundleRootPath, "databricks.yml")
|
||||
testutil.Touch(t, b.BundleRootPath, "a.yml")
|
||||
testutil.Touch(t, b.BundleRootPath, "b.yml")
|
||||
|
||||
diags := bundle.Apply(context.Background(), b, loader.ProcessRootIncludes())
|
||||
require.NoError(t, diags.Error())
|
||||
|
@ -63,7 +63,7 @@ func TestProcessRootIncludesSingleGlob(t *testing.T) {
|
|||
|
||||
func TestProcessRootIncludesMultiGlob(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
RootPath: t.TempDir(),
|
||||
BundleRootPath: t.TempDir(),
|
||||
Config: config.Root{
|
||||
Include: []string{
|
||||
"a*.yml",
|
||||
|
@ -72,8 +72,8 @@ func TestProcessRootIncludesMultiGlob(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
testutil.Touch(t, b.RootPath, "a1.yml")
|
||||
testutil.Touch(t, b.RootPath, "b1.yml")
|
||||
testutil.Touch(t, b.BundleRootPath, "a1.yml")
|
||||
testutil.Touch(t, b.BundleRootPath, "b1.yml")
|
||||
|
||||
diags := bundle.Apply(context.Background(), b, loader.ProcessRootIncludes())
|
||||
require.NoError(t, diags.Error())
|
||||
|
@ -82,7 +82,7 @@ func TestProcessRootIncludesMultiGlob(t *testing.T) {
|
|||
|
||||
func TestProcessRootIncludesRemoveDups(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
RootPath: t.TempDir(),
|
||||
BundleRootPath: t.TempDir(),
|
||||
Config: config.Root{
|
||||
Include: []string{
|
||||
"*.yml",
|
||||
|
@ -91,7 +91,7 @@ func TestProcessRootIncludesRemoveDups(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
testutil.Touch(t, b.RootPath, "a.yml")
|
||||
testutil.Touch(t, b.BundleRootPath, "a.yml")
|
||||
|
||||
diags := bundle.Apply(context.Background(), b, loader.ProcessRootIncludes())
|
||||
require.NoError(t, diags.Error())
|
||||
|
@ -100,7 +100,7 @@ func TestProcessRootIncludesRemoveDups(t *testing.T) {
|
|||
|
||||
func TestProcessRootIncludesNotExists(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
RootPath: t.TempDir(),
|
||||
BundleRootPath: t.TempDir(),
|
||||
Config: config.Root{
|
||||
Include: []string{
|
||||
"notexist.yml",
|
||||
|
|
|
@ -0,0 +1,11 @@
|
|||
resources:
|
||||
pipelines:
|
||||
pipeline1:
|
||||
name: pipeline1
|
||||
|
||||
targets:
|
||||
target1:
|
||||
resources:
|
||||
jobs:
|
||||
job1:
|
||||
name: job1
|
|
@ -0,0 +1,43 @@
|
|||
resources:
|
||||
experiments:
|
||||
experiment1:
|
||||
name: experiment1
|
||||
|
||||
model_serving_endpoints:
|
||||
model_serving_endpoint1:
|
||||
name: model_serving_endpoint1
|
||||
|
||||
jobs:
|
||||
job1:
|
||||
name: job1
|
||||
job2:
|
||||
name: job2
|
||||
|
||||
models:
|
||||
model1:
|
||||
name: model1
|
||||
|
||||
pipelines:
|
||||
pipeline1:
|
||||
name: pipeline1
|
||||
pipeline2:
|
||||
name: pipeline2
|
||||
|
||||
schemas:
|
||||
schema1:
|
||||
name: schema1
|
||||
|
||||
targets:
|
||||
target1:
|
||||
resources:
|
||||
quality_monitors:
|
||||
quality_monitor1:
|
||||
baseline_table_name: quality_monitor1
|
||||
|
||||
jobs:
|
||||
job3:
|
||||
name: job3
|
||||
|
||||
registered_models:
|
||||
registered_model1:
|
||||
name: registered_model1
|
|
@ -0,0 +1,11 @@
|
|||
resources:
|
||||
jobs:
|
||||
job1:
|
||||
name: job1
|
||||
|
||||
targets:
|
||||
target1:
|
||||
resources:
|
||||
jobs:
|
||||
job1:
|
||||
description: job1
|
|
@ -0,0 +1,4 @@
|
|||
resources:
|
||||
pipelines:
|
||||
pipeline1:
|
||||
name: pipeline1
|
|
@ -0,0 +1,7 @@
|
|||
resources:
|
||||
jobs:
|
||||
job1:
|
||||
name: job1
|
||||
|
||||
job2:
|
||||
name: job2
|
11
bundle/config/loader/testdata/format_not_match/job_and_pipeline.experiment.yml
vendored
Normal file
11
bundle/config/loader/testdata/format_not_match/job_and_pipeline.experiment.yml
vendored
Normal file
|
@ -0,0 +1,11 @@
|
|||
resources:
|
||||
pipelines:
|
||||
pipeline1:
|
||||
name: pipeline1
|
||||
|
||||
targets:
|
||||
target1:
|
||||
resources:
|
||||
jobs:
|
||||
job1:
|
||||
name: job1
|
|
@ -0,0 +1,11 @@
|
|||
resources:
|
||||
pipelines:
|
||||
pipeline1:
|
||||
name: pipeline1
|
||||
|
||||
targets:
|
||||
target1:
|
||||
resources:
|
||||
jobs:
|
||||
job1:
|
||||
name: job1
|
43
bundle/config/loader/testdata/format_not_match/multiple_resources.model_serving_endpoint.yml
vendored
Normal file
43
bundle/config/loader/testdata/format_not_match/multiple_resources.model_serving_endpoint.yml
vendored
Normal file
|
@ -0,0 +1,43 @@
|
|||
resources:
|
||||
experiments:
|
||||
experiment1:
|
||||
name: experiment1
|
||||
|
||||
model_serving_endpoints:
|
||||
model_serving_endpoint1:
|
||||
name: model_serving_endpoint1
|
||||
|
||||
jobs:
|
||||
job1:
|
||||
name: job1
|
||||
job2:
|
||||
name: job2
|
||||
|
||||
models:
|
||||
model1:
|
||||
name: model1
|
||||
|
||||
pipelines:
|
||||
pipeline1:
|
||||
name: pipeline1
|
||||
pipeline2:
|
||||
name: pipeline2
|
||||
|
||||
schemas:
|
||||
schema1:
|
||||
name: schema1
|
||||
|
||||
targets:
|
||||
target1:
|
||||
resources:
|
||||
quality_monitors:
|
||||
quality_monitor1:
|
||||
baseline_table_name: quality_monitor1
|
||||
|
||||
jobs:
|
||||
job3:
|
||||
name: job3
|
||||
|
||||
registered_models:
|
||||
registered_model1:
|
||||
name: registered_model1
|
|
@ -0,0 +1,11 @@
|
|||
resources:
|
||||
jobs:
|
||||
job1:
|
||||
name: job1
|
||||
|
||||
targets:
|
||||
target1:
|
||||
resources:
|
||||
jobs:
|
||||
job2:
|
||||
name: job2
|
|
@ -0,0 +1,11 @@
|
|||
resources:
|
||||
jobs:
|
||||
job1:
|
||||
name: job1
|
||||
|
||||
targets:
|
||||
target1:
|
||||
resources:
|
||||
jobs:
|
||||
job1:
|
||||
description: job1
|
|
@ -0,0 +1,7 @@
|
|||
resources:
|
||||
jobs:
|
||||
job1:
|
||||
name: job1
|
||||
|
||||
job2:
|
||||
name: job2
|
|
@ -0,0 +1,8 @@
|
|||
targets:
|
||||
target1:
|
||||
resources:
|
||||
jobs:
|
||||
job1:
|
||||
description: job1
|
||||
job2:
|
||||
description: job2
|
|
@ -35,8 +35,10 @@ func (m *applyPresets) Name() string {
|
|||
}
|
||||
|
||||
func (m *applyPresets) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||
var diags diag.Diagnostics
|
||||
|
||||
if d := validatePauseStatus(b); d != nil {
|
||||
return d
|
||||
diags = diags.Extend(d)
|
||||
}
|
||||
|
||||
r := b.Config.Resources
|
||||
|
@ -45,7 +47,11 @@ func (m *applyPresets) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnos
|
|||
tags := toTagArray(t.Tags)
|
||||
|
||||
// Jobs presets: Prefix, Tags, JobsMaxConcurrentRuns, TriggerPauseStatus
|
||||
for _, j := range r.Jobs {
|
||||
for key, j := range r.Jobs {
|
||||
if j.JobSettings == nil {
|
||||
diags = diags.Extend(diag.Errorf("job %s is not defined", key))
|
||||
continue
|
||||
}
|
||||
j.Name = prefix + j.Name
|
||||
if j.Tags == nil {
|
||||
j.Tags = make(map[string]string)
|
||||
|
@ -77,20 +83,27 @@ func (m *applyPresets) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnos
|
|||
}
|
||||
|
||||
// Pipelines presets: Prefix, PipelinesDevelopment
|
||||
for i := range r.Pipelines {
|
||||
r.Pipelines[i].Name = prefix + r.Pipelines[i].Name
|
||||
for key, p := range r.Pipelines {
|
||||
if p.PipelineSpec == nil {
|
||||
diags = diags.Extend(diag.Errorf("pipeline %s is not defined", key))
|
||||
continue
|
||||
}
|
||||
p.Name = prefix + p.Name
|
||||
if config.IsExplicitlyEnabled(t.PipelinesDevelopment) {
|
||||
r.Pipelines[i].Development = true
|
||||
p.Development = true
|
||||
}
|
||||
if t.TriggerPauseStatus == config.Paused {
|
||||
r.Pipelines[i].Continuous = false
|
||||
p.Continuous = false
|
||||
}
|
||||
|
||||
// As of 2024-06, pipelines don't yet support tags
|
||||
}
|
||||
|
||||
// Models presets: Prefix, Tags
|
||||
for _, m := range r.Models {
|
||||
for key, m := range r.Models {
|
||||
if m.Model == nil {
|
||||
diags = diags.Extend(diag.Errorf("model %s is not defined", key))
|
||||
continue
|
||||
}
|
||||
m.Name = prefix + m.Name
|
||||
for _, t := range tags {
|
||||
exists := slices.ContainsFunc(m.Tags, func(modelTag ml.ModelTag) bool {
|
||||
|
@ -104,7 +117,11 @@ func (m *applyPresets) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnos
|
|||
}
|
||||
|
||||
// Experiments presets: Prefix, Tags
|
||||
for _, e := range r.Experiments {
|
||||
for key, e := range r.Experiments {
|
||||
if e.Experiment == nil {
|
||||
diags = diags.Extend(diag.Errorf("experiment %s is not defined", key))
|
||||
continue
|
||||
}
|
||||
filepath := e.Name
|
||||
dir := path.Dir(filepath)
|
||||
base := path.Base(filepath)
|
||||
|
@ -128,34 +145,50 @@ func (m *applyPresets) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnos
|
|||
}
|
||||
|
||||
// Model serving endpoint presets: Prefix
|
||||
for i := range r.ModelServingEndpoints {
|
||||
r.ModelServingEndpoints[i].Name = normalizePrefix(prefix) + r.ModelServingEndpoints[i].Name
|
||||
for key, e := range r.ModelServingEndpoints {
|
||||
if e.CreateServingEndpoint == nil {
|
||||
diags = diags.Extend(diag.Errorf("model serving endpoint %s is not defined", key))
|
||||
continue
|
||||
}
|
||||
e.Name = normalizePrefix(prefix) + e.Name
|
||||
|
||||
// As of 2024-06, model serving endpoints don't yet support tags
|
||||
}
|
||||
|
||||
// Registered models presets: Prefix
|
||||
for i := range r.RegisteredModels {
|
||||
r.RegisteredModels[i].Name = normalizePrefix(prefix) + r.RegisteredModels[i].Name
|
||||
for key, m := range r.RegisteredModels {
|
||||
if m.CreateRegisteredModelRequest == nil {
|
||||
diags = diags.Extend(diag.Errorf("registered model %s is not defined", key))
|
||||
continue
|
||||
}
|
||||
m.Name = normalizePrefix(prefix) + m.Name
|
||||
|
||||
// As of 2024-06, registered models don't yet support tags
|
||||
}
|
||||
|
||||
// Quality monitors presets: Prefix
|
||||
// Quality monitors presets: Schedule
|
||||
if t.TriggerPauseStatus == config.Paused {
|
||||
for i := range r.QualityMonitors {
|
||||
for key, q := range r.QualityMonitors {
|
||||
if q.CreateMonitor == nil {
|
||||
diags = diags.Extend(diag.Errorf("quality monitor %s is not defined", key))
|
||||
continue
|
||||
}
|
||||
// Remove all schedules from monitors, since they don't support pausing/unpausing.
|
||||
// Quality monitors might support the "pause" property in the future, so at the
|
||||
// CLI level we do respect that property if it is set to "unpaused."
|
||||
if r.QualityMonitors[i].Schedule != nil && r.QualityMonitors[i].Schedule.PauseStatus != catalog.MonitorCronSchedulePauseStatusUnpaused {
|
||||
r.QualityMonitors[i].Schedule = nil
|
||||
if q.Schedule != nil && q.Schedule.PauseStatus != catalog.MonitorCronSchedulePauseStatusUnpaused {
|
||||
q.Schedule = nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Schemas: Prefix
|
||||
for i := range r.Schemas {
|
||||
r.Schemas[i].Name = normalizePrefix(prefix) + r.Schemas[i].Name
|
||||
for key, s := range r.Schemas {
|
||||
if s.CreateSchema == nil {
|
||||
diags = diags.Extend(diag.Errorf("schema %s is not defined", key))
|
||||
continue
|
||||
}
|
||||
s.Name = normalizePrefix(prefix) + s.Name
|
||||
// HTTP API for schemas doesn't yet support tags. It's only supported in
|
||||
// the Databricks UI and via the SQL API.
|
||||
}
|
||||
|
@ -167,7 +200,26 @@ func (m *applyPresets) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnos
|
|||
// the Databricks UI and via the SQL API.
|
||||
}
|
||||
|
||||
return nil
|
||||
// Clusters: Prefix, Tags
|
||||
for key, c := range r.Clusters {
|
||||
if c.ClusterSpec == nil {
|
||||
diags = diags.Extend(diag.Errorf("cluster %s is not defined", key))
|
||||
continue
|
||||
}
|
||||
c.ClusterName = prefix + c.ClusterName
|
||||
if c.CustomTags == nil {
|
||||
c.CustomTags = make(map[string]string)
|
||||
}
|
||||
for _, tag := range tags {
|
||||
normalisedKey := b.Tagging.NormalizeKey(tag.Key)
|
||||
normalisedValue := b.Tagging.NormalizeValue(tag.Value)
|
||||
if _, ok := c.CustomTags[normalisedKey]; !ok {
|
||||
c.CustomTags[normalisedKey] = normalisedValue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return diags
|
||||
}
|
||||
|
||||
func validatePauseStatus(b *bundle.Bundle) diag.Diagnostics {
|
||||
|
|
|
@ -307,3 +307,116 @@ func TestApplyPresetsJobsMaxConcurrentRuns(t *testing.T) {
|
|||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestApplyPresetsPrefixWithoutJobSettings(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Resources: config.Resources{
|
||||
Jobs: map[string]*resources.Job{
|
||||
"job1": {}, // no jobsettings inside
|
||||
},
|
||||
},
|
||||
Presets: config.Presets{
|
||||
NamePrefix: "prefix-",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
diags := bundle.Apply(ctx, b, mutator.ApplyPresets())
|
||||
|
||||
require.ErrorContains(t, diags.Error(), "job job1 is not defined")
|
||||
}
|
||||
|
||||
func TestApplyPresetsResourceNotDefined(t *testing.T) {
|
||||
tests := []struct {
|
||||
resources config.Resources
|
||||
error string
|
||||
}{
|
||||
{
|
||||
resources: config.Resources{
|
||||
Jobs: map[string]*resources.Job{
|
||||
"job1": {}, // no jobsettings inside
|
||||
},
|
||||
},
|
||||
error: "job job1 is not defined",
|
||||
},
|
||||
{
|
||||
resources: config.Resources{
|
||||
Pipelines: map[string]*resources.Pipeline{
|
||||
"pipeline1": {}, // no pipelinespec inside
|
||||
},
|
||||
},
|
||||
error: "pipeline pipeline1 is not defined",
|
||||
},
|
||||
{
|
||||
resources: config.Resources{
|
||||
Models: map[string]*resources.MlflowModel{
|
||||
"model1": {}, // no model inside
|
||||
},
|
||||
},
|
||||
error: "model model1 is not defined",
|
||||
},
|
||||
{
|
||||
resources: config.Resources{
|
||||
Experiments: map[string]*resources.MlflowExperiment{
|
||||
"experiment1": {}, // no experiment inside
|
||||
},
|
||||
},
|
||||
error: "experiment experiment1 is not defined",
|
||||
},
|
||||
{
|
||||
resources: config.Resources{
|
||||
ModelServingEndpoints: map[string]*resources.ModelServingEndpoint{
|
||||
"endpoint1": {}, // no CreateServingEndpoint inside
|
||||
},
|
||||
RegisteredModels: map[string]*resources.RegisteredModel{
|
||||
"model1": {}, // no CreateRegisteredModelRequest inside
|
||||
},
|
||||
},
|
||||
error: "model serving endpoint endpoint1 is not defined",
|
||||
},
|
||||
{
|
||||
resources: config.Resources{
|
||||
QualityMonitors: map[string]*resources.QualityMonitor{
|
||||
"monitor1": {}, // no CreateMonitor inside
|
||||
},
|
||||
},
|
||||
error: "quality monitor monitor1 is not defined",
|
||||
},
|
||||
{
|
||||
resources: config.Resources{
|
||||
Schemas: map[string]*resources.Schema{
|
||||
"schema1": {}, // no CreateSchema inside
|
||||
},
|
||||
},
|
||||
error: "schema schema1 is not defined",
|
||||
},
|
||||
{
|
||||
resources: config.Resources{
|
||||
Clusters: map[string]*resources.Cluster{
|
||||
"cluster1": {}, // no ClusterSpec inside
|
||||
},
|
||||
},
|
||||
error: "cluster cluster1 is not defined",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.error, func(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Resources: tt.resources,
|
||||
Presets: config.Presets{
|
||||
TriggerPauseStatus: config.Paused,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
diags := bundle.Apply(ctx, b, mutator.ApplyPresets())
|
||||
|
||||
require.ErrorContains(t, diags.Error(), tt.error)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,87 @@
|
|||
package mutator
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/libs/diag"
|
||||
"github.com/databricks/cli/libs/dyn"
|
||||
)
|
||||
|
||||
type computeIdToClusterId struct{}
|
||||
|
||||
func ComputeIdToClusterId() bundle.Mutator {
|
||||
return &computeIdToClusterId{}
|
||||
}
|
||||
|
||||
func (m *computeIdToClusterId) Name() string {
|
||||
return "ComputeIdToClusterId"
|
||||
}
|
||||
|
||||
func (m *computeIdToClusterId) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||
var diags diag.Diagnostics
|
||||
|
||||
// The "compute_id" key is set; rewrite it to "cluster_id".
|
||||
err := b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) {
|
||||
v, d := rewriteComputeIdToClusterId(v, dyn.NewPath(dyn.Key("bundle")))
|
||||
diags = diags.Extend(d)
|
||||
|
||||
// Check if the "compute_id" key is set in any target overrides.
|
||||
return dyn.MapByPattern(v, dyn.NewPattern(dyn.Key("targets"), dyn.AnyKey()), func(p dyn.Path, v dyn.Value) (dyn.Value, error) {
|
||||
v, d := rewriteComputeIdToClusterId(v, dyn.Path{})
|
||||
diags = diags.Extend(d)
|
||||
return v, nil
|
||||
})
|
||||
})
|
||||
|
||||
diags = diags.Extend(diag.FromErr(err))
|
||||
return diags
|
||||
}
|
||||
|
||||
func rewriteComputeIdToClusterId(v dyn.Value, p dyn.Path) (dyn.Value, diag.Diagnostics) {
|
||||
var diags diag.Diagnostics
|
||||
computeIdPath := p.Append(dyn.Key("compute_id"))
|
||||
computeId, err := dyn.GetByPath(v, computeIdPath)
|
||||
|
||||
// If the "compute_id" key is not set, we don't need to do anything.
|
||||
if err != nil {
|
||||
return v, nil
|
||||
}
|
||||
|
||||
if computeId.Kind() == dyn.KindInvalid {
|
||||
return v, nil
|
||||
}
|
||||
|
||||
diags = diags.Append(diag.Diagnostic{
|
||||
Severity: diag.Warning,
|
||||
Summary: "compute_id is deprecated, please use cluster_id instead",
|
||||
Locations: computeId.Locations(),
|
||||
Paths: []dyn.Path{computeIdPath},
|
||||
})
|
||||
|
||||
clusterIdPath := p.Append(dyn.Key("cluster_id"))
|
||||
nv, err := dyn.SetByPath(v, clusterIdPath, computeId)
|
||||
if err != nil {
|
||||
return dyn.InvalidValue, diag.FromErr(err)
|
||||
}
|
||||
// Drop the "compute_id" key.
|
||||
vout, err := dyn.Walk(nv, func(p dyn.Path, v dyn.Value) (dyn.Value, error) {
|
||||
switch len(p) {
|
||||
case 0:
|
||||
return v, nil
|
||||
case 1:
|
||||
if p[0] == dyn.Key("compute_id") {
|
||||
return v, dyn.ErrDrop
|
||||
}
|
||||
return v, nil
|
||||
case 2:
|
||||
if p[1] == dyn.Key("compute_id") {
|
||||
return v, dyn.ErrDrop
|
||||
}
|
||||
}
|
||||
return v, dyn.ErrSkip
|
||||
})
|
||||
|
||||
diags = diags.Extend(diag.FromErr(err))
|
||||
return vout, diags
|
||||
}
|
|
@ -0,0 +1,57 @@
|
|||
package mutator_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/bundle/config"
|
||||
"github.com/databricks/cli/bundle/config/mutator"
|
||||
"github.com/databricks/cli/libs/diag"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestComputeIdToClusterId(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Bundle: config.Bundle{
|
||||
ComputeId: "compute-id",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
diags := bundle.Apply(context.Background(), b, mutator.ComputeIdToClusterId())
|
||||
assert.NoError(t, diags.Error())
|
||||
assert.Equal(t, "compute-id", b.Config.Bundle.ClusterId)
|
||||
assert.Empty(t, b.Config.Bundle.ComputeId)
|
||||
|
||||
assert.Len(t, diags, 1)
|
||||
assert.Equal(t, "compute_id is deprecated, please use cluster_id instead", diags[0].Summary)
|
||||
assert.Equal(t, diag.Warning, diags[0].Severity)
|
||||
}
|
||||
|
||||
func TestComputeIdToClusterIdInTargetOverride(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Targets: map[string]*config.Target{
|
||||
"dev": {
|
||||
ComputeId: "compute-id-dev",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
diags := bundle.Apply(context.Background(), b, mutator.ComputeIdToClusterId())
|
||||
assert.NoError(t, diags.Error())
|
||||
assert.Empty(t, b.Config.Targets["dev"].ComputeId)
|
||||
|
||||
diags = diags.Extend(bundle.Apply(context.Background(), b, mutator.SelectTarget("dev")))
|
||||
assert.NoError(t, diags.Error())
|
||||
|
||||
assert.Equal(t, "compute-id-dev", b.Config.Bundle.ClusterId)
|
||||
assert.Empty(t, b.Config.Bundle.ComputeId)
|
||||
|
||||
assert.Len(t, diags, 1)
|
||||
assert.Equal(t, "compute_id is deprecated, please use cluster_id instead", diags[0].Summary)
|
||||
assert.Equal(t, diag.Warning, diags[0].Severity)
|
||||
}
|
|
@ -29,6 +29,10 @@ func (m *defineDefaultWorkspacePaths) Apply(ctx context.Context, b *bundle.Bundl
|
|||
b.Config.Workspace.FilePath = path.Join(root, "files")
|
||||
}
|
||||
|
||||
if b.Config.Workspace.ResourcePath == "" {
|
||||
b.Config.Workspace.ResourcePath = path.Join(root, "resources")
|
||||
}
|
||||
|
||||
if b.Config.Workspace.ArtifactPath == "" {
|
||||
b.Config.Workspace.ArtifactPath = path.Join(root, "artifacts")
|
||||
}
|
||||
|
|
|
@ -22,6 +22,7 @@ func TestDefineDefaultWorkspacePaths(t *testing.T) {
|
|||
diags := bundle.Apply(context.Background(), b, mutator.DefineDefaultWorkspacePaths())
|
||||
require.NoError(t, diags.Error())
|
||||
assert.Equal(t, "/files", b.Config.Workspace.FilePath)
|
||||
assert.Equal(t, "/resources", b.Config.Workspace.ResourcePath)
|
||||
assert.Equal(t, "/artifacts", b.Config.Workspace.ArtifactPath)
|
||||
assert.Equal(t, "/state", b.Config.Workspace.StatePath)
|
||||
}
|
||||
|
@ -32,6 +33,7 @@ func TestDefineDefaultWorkspacePathsAlreadySet(t *testing.T) {
|
|||
Workspace: config.Workspace{
|
||||
RootPath: "/",
|
||||
FilePath: "/foo/bar",
|
||||
ResourcePath: "/foo/bar",
|
||||
ArtifactPath: "/foo/bar",
|
||||
StatePath: "/foo/bar",
|
||||
},
|
||||
|
@ -40,6 +42,7 @@ func TestDefineDefaultWorkspacePathsAlreadySet(t *testing.T) {
|
|||
diags := bundle.Apply(context.Background(), b, mutator.DefineDefaultWorkspacePaths())
|
||||
require.NoError(t, diags.Error())
|
||||
assert.Equal(t, "/foo/bar", b.Config.Workspace.FilePath)
|
||||
assert.Equal(t, "/foo/bar", b.Config.Workspace.ResourcePath)
|
||||
assert.Equal(t, "/foo/bar", b.Config.Workspace.ArtifactPath)
|
||||
assert.Equal(t, "/foo/bar", b.Config.Workspace.StatePath)
|
||||
}
|
||||
|
|
|
@ -10,6 +10,7 @@ import (
|
|||
"github.com/databricks/cli/bundle/config"
|
||||
"github.com/databricks/cli/bundle/config/resources"
|
||||
"github.com/databricks/cli/bundle/internal/bundletest"
|
||||
"github.com/databricks/cli/libs/dyn"
|
||||
"github.com/databricks/databricks-sdk-go/service/compute"
|
||||
"github.com/databricks/databricks-sdk-go/service/pipelines"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
@ -41,7 +42,7 @@ func TestExpandGlobPathsInPipelines(t *testing.T) {
|
|||
touchEmptyFile(t, filepath.Join(dir, "skip/test7.py"))
|
||||
|
||||
b := &bundle.Bundle{
|
||||
RootPath: dir,
|
||||
BundleRootPath: dir,
|
||||
Config: config.Root{
|
||||
Resources: config.Resources{
|
||||
Pipelines: map[string]*resources.Pipeline{
|
||||
|
@ -105,8 +106,8 @@ func TestExpandGlobPathsInPipelines(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
bundletest.SetLocation(b, ".", filepath.Join(dir, "resource.yml"))
|
||||
bundletest.SetLocation(b, "resources.pipelines.pipeline.libraries[3]", filepath.Join(dir, "relative", "resource.yml"))
|
||||
bundletest.SetLocation(b, ".", []dyn.Location{{File: filepath.Join(dir, "resource.yml")}})
|
||||
bundletest.SetLocation(b, "resources.pipelines.pipeline.libraries[3]", []dyn.Location{{File: filepath.Join(dir, "relative", "resource.yml")}})
|
||||
|
||||
m := ExpandPipelineGlobPaths()
|
||||
diags := bundle.Apply(context.Background(), b, m)
|
||||
|
|
|
@ -33,7 +33,7 @@ func (m *expandWorkspaceRoot) Apply(ctx context.Context, b *bundle.Bundle) diag.
|
|||
}
|
||||
|
||||
if strings.HasPrefix(root, "~/") {
|
||||
home := fmt.Sprintf("/Users/%s", currentUser.UserName)
|
||||
home := fmt.Sprintf("/Workspace/Users/%s", currentUser.UserName)
|
||||
b.Config.Workspace.RootPath = path.Join(home, root[2:])
|
||||
}
|
||||
|
||||
|
|
|
@ -27,7 +27,7 @@ func TestExpandWorkspaceRoot(t *testing.T) {
|
|||
}
|
||||
diags := bundle.Apply(context.Background(), b, mutator.ExpandWorkspaceRoot())
|
||||
require.NoError(t, diags.Error())
|
||||
assert.Equal(t, "/Users/jane@doe.com/foo", b.Config.Workspace.RootPath)
|
||||
assert.Equal(t, "/Workspace/Users/jane@doe.com/foo", b.Config.Workspace.RootPath)
|
||||
}
|
||||
|
||||
func TestExpandWorkspaceRootDoesNothing(t *testing.T) {
|
||||
|
|
|
@ -56,7 +56,7 @@ func (m *loadGitDetails) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagn
|
|||
}
|
||||
|
||||
// Compute relative path of the bundle root from the Git repo root.
|
||||
absBundlePath, err := filepath.Abs(b.RootPath)
|
||||
absBundlePath, err := filepath.Abs(b.BundleRootPath)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
|
|
@ -23,6 +23,7 @@ func DefaultMutators() []bundle.Mutator {
|
|||
VerifyCliVersion(),
|
||||
|
||||
EnvironmentsToTargets(),
|
||||
ComputeIdToClusterId(),
|
||||
InitializeVariables(),
|
||||
DefineDefaultTarget(),
|
||||
LoadGitDetails(),
|
||||
|
|
|
@ -39,22 +39,22 @@ func overrideJobCompute(j *resources.Job, compute string) {
|
|||
|
||||
func (m *overrideCompute) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||
if b.Config.Bundle.Mode != config.Development {
|
||||
if b.Config.Bundle.ComputeID != "" {
|
||||
if b.Config.Bundle.ClusterId != "" {
|
||||
return diag.Errorf("cannot override compute for an target that does not use 'mode: development'")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
if v := env.Get(ctx, "DATABRICKS_CLUSTER_ID"); v != "" {
|
||||
b.Config.Bundle.ComputeID = v
|
||||
b.Config.Bundle.ClusterId = v
|
||||
}
|
||||
|
||||
if b.Config.Bundle.ComputeID == "" {
|
||||
if b.Config.Bundle.ClusterId == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
r := b.Config.Resources
|
||||
for i := range r.Jobs {
|
||||
overrideJobCompute(r.Jobs[i], b.Config.Bundle.ComputeID)
|
||||
overrideJobCompute(r.Jobs[i], b.Config.Bundle.ClusterId)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
|
@ -20,7 +20,7 @@ func TestOverrideDevelopment(t *testing.T) {
|
|||
Config: config.Root{
|
||||
Bundle: config.Bundle{
|
||||
Mode: config.Development,
|
||||
ComputeID: "newClusterID",
|
||||
ClusterId: "newClusterID",
|
||||
},
|
||||
Resources: config.Resources{
|
||||
Jobs: map[string]*resources.Job{
|
||||
|
@ -144,7 +144,7 @@ func TestOverrideProduction(t *testing.T) {
|
|||
b := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Bundle: config.Bundle{
|
||||
ComputeID: "newClusterID",
|
||||
ClusterId: "newClusterID",
|
||||
},
|
||||
Resources: config.Resources{
|
||||
Jobs: map[string]*resources.Job{
|
||||
|
|
|
@ -0,0 +1,115 @@
|
|||
package paths
|
||||
|
||||
import (
|
||||
"github.com/databricks/cli/bundle/libraries"
|
||||
"github.com/databricks/cli/libs/dyn"
|
||||
)
|
||||
|
||||
type jobRewritePattern struct {
|
||||
pattern dyn.Pattern
|
||||
kind PathKind
|
||||
skipRewrite func(string) bool
|
||||
}
|
||||
|
||||
func noSkipRewrite(string) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func jobTaskRewritePatterns(base dyn.Pattern) []jobRewritePattern {
|
||||
return []jobRewritePattern{
|
||||
{
|
||||
base.Append(dyn.Key("notebook_task"), dyn.Key("notebook_path")),
|
||||
PathKindNotebook,
|
||||
noSkipRewrite,
|
||||
},
|
||||
{
|
||||
base.Append(dyn.Key("spark_python_task"), dyn.Key("python_file")),
|
||||
PathKindWorkspaceFile,
|
||||
noSkipRewrite,
|
||||
},
|
||||
{
|
||||
base.Append(dyn.Key("dbt_task"), dyn.Key("project_directory")),
|
||||
PathKindDirectory,
|
||||
noSkipRewrite,
|
||||
},
|
||||
{
|
||||
base.Append(dyn.Key("sql_task"), dyn.Key("file"), dyn.Key("path")),
|
||||
PathKindWorkspaceFile,
|
||||
noSkipRewrite,
|
||||
},
|
||||
{
|
||||
base.Append(dyn.Key("libraries"), dyn.AnyIndex(), dyn.Key("whl")),
|
||||
PathKindLibrary,
|
||||
noSkipRewrite,
|
||||
},
|
||||
{
|
||||
base.Append(dyn.Key("libraries"), dyn.AnyIndex(), dyn.Key("jar")),
|
||||
PathKindLibrary,
|
||||
noSkipRewrite,
|
||||
},
|
||||
{
|
||||
base.Append(dyn.Key("libraries"), dyn.AnyIndex(), dyn.Key("requirements")),
|
||||
PathKindWorkspaceFile,
|
||||
noSkipRewrite,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func jobRewritePatterns() []jobRewritePattern {
|
||||
// Base pattern to match all tasks in all jobs.
|
||||
base := dyn.NewPattern(
|
||||
dyn.Key("resources"),
|
||||
dyn.Key("jobs"),
|
||||
dyn.AnyKey(),
|
||||
dyn.Key("tasks"),
|
||||
dyn.AnyIndex(),
|
||||
)
|
||||
|
||||
// Compile list of patterns and their respective rewrite functions.
|
||||
jobEnvironmentsPatterns := []jobRewritePattern{
|
||||
{
|
||||
dyn.NewPattern(
|
||||
dyn.Key("resources"),
|
||||
dyn.Key("jobs"),
|
||||
dyn.AnyKey(),
|
||||
dyn.Key("environments"),
|
||||
dyn.AnyIndex(),
|
||||
dyn.Key("spec"),
|
||||
dyn.Key("dependencies"),
|
||||
dyn.AnyIndex(),
|
||||
),
|
||||
PathKindWithPrefix,
|
||||
func(s string) bool {
|
||||
return !libraries.IsLibraryLocal(s)
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
taskPatterns := jobTaskRewritePatterns(base)
|
||||
forEachPatterns := jobTaskRewritePatterns(base.Append(dyn.Key("for_each_task"), dyn.Key("task")))
|
||||
allPatterns := append(taskPatterns, jobEnvironmentsPatterns...)
|
||||
allPatterns = append(allPatterns, forEachPatterns...)
|
||||
return allPatterns
|
||||
}
|
||||
|
||||
// VisitJobPaths visits all paths in job resources and applies a function to each path.
|
||||
func VisitJobPaths(value dyn.Value, fn VisitFunc) (dyn.Value, error) {
|
||||
var err error
|
||||
var newValue = value
|
||||
|
||||
for _, rewritePattern := range jobRewritePatterns() {
|
||||
newValue, err = dyn.MapByPattern(newValue, rewritePattern.pattern, func(p dyn.Path, v dyn.Value) (dyn.Value, error) {
|
||||
if rewritePattern.skipRewrite(v.MustString()) {
|
||||
return v, nil
|
||||
}
|
||||
|
||||
return fn(p, rewritePattern.kind, v)
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return dyn.InvalidValue, err
|
||||
}
|
||||
}
|
||||
|
||||
return newValue, nil
|
||||
}
|
|
@ -0,0 +1,168 @@
|
|||
package paths
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/databricks/cli/bundle/config"
|
||||
"github.com/databricks/cli/bundle/config/resources"
|
||||
"github.com/databricks/cli/libs/dyn"
|
||||
assert "github.com/databricks/cli/libs/dyn/dynassert"
|
||||
"github.com/databricks/databricks-sdk-go/service/compute"
|
||||
"github.com/databricks/databricks-sdk-go/service/jobs"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestVisitJobPaths(t *testing.T) {
|
||||
task0 := jobs.Task{
|
||||
NotebookTask: &jobs.NotebookTask{
|
||||
NotebookPath: "abc",
|
||||
},
|
||||
}
|
||||
task1 := jobs.Task{
|
||||
SparkPythonTask: &jobs.SparkPythonTask{
|
||||
PythonFile: "abc",
|
||||
},
|
||||
}
|
||||
task2 := jobs.Task{
|
||||
DbtTask: &jobs.DbtTask{
|
||||
ProjectDirectory: "abc",
|
||||
},
|
||||
}
|
||||
task3 := jobs.Task{
|
||||
SqlTask: &jobs.SqlTask{
|
||||
File: &jobs.SqlTaskFile{
|
||||
Path: "abc",
|
||||
},
|
||||
},
|
||||
}
|
||||
task4 := jobs.Task{
|
||||
Libraries: []compute.Library{
|
||||
{Whl: "dist/foo.whl"},
|
||||
},
|
||||
}
|
||||
task5 := jobs.Task{
|
||||
Libraries: []compute.Library{
|
||||
{Jar: "dist/foo.jar"},
|
||||
},
|
||||
}
|
||||
task6 := jobs.Task{
|
||||
Libraries: []compute.Library{
|
||||
{Requirements: "requirements.txt"},
|
||||
},
|
||||
}
|
||||
|
||||
job0 := &resources.Job{
|
||||
JobSettings: &jobs.JobSettings{
|
||||
Tasks: []jobs.Task{
|
||||
task0,
|
||||
task1,
|
||||
task2,
|
||||
task3,
|
||||
task4,
|
||||
task5,
|
||||
task6,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
root := config.Root{
|
||||
Resources: config.Resources{
|
||||
Jobs: map[string]*resources.Job{
|
||||
"job0": job0,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
actual := visitJobPaths(t, root)
|
||||
expected := []dyn.Path{
|
||||
dyn.MustPathFromString("resources.jobs.job0.tasks[0].notebook_task.notebook_path"),
|
||||
dyn.MustPathFromString("resources.jobs.job0.tasks[1].spark_python_task.python_file"),
|
||||
dyn.MustPathFromString("resources.jobs.job0.tasks[2].dbt_task.project_directory"),
|
||||
dyn.MustPathFromString("resources.jobs.job0.tasks[3].sql_task.file.path"),
|
||||
dyn.MustPathFromString("resources.jobs.job0.tasks[4].libraries[0].whl"),
|
||||
dyn.MustPathFromString("resources.jobs.job0.tasks[5].libraries[0].jar"),
|
||||
dyn.MustPathFromString("resources.jobs.job0.tasks[6].libraries[0].requirements"),
|
||||
}
|
||||
|
||||
assert.ElementsMatch(t, expected, actual)
|
||||
}
|
||||
|
||||
func TestVisitJobPaths_environments(t *testing.T) {
|
||||
environment0 := jobs.JobEnvironment{
|
||||
Spec: &compute.Environment{
|
||||
Dependencies: []string{
|
||||
"dist_0/*.whl",
|
||||
"dist_1/*.whl",
|
||||
},
|
||||
},
|
||||
}
|
||||
job0 := &resources.Job{
|
||||
JobSettings: &jobs.JobSettings{
|
||||
Environments: []jobs.JobEnvironment{
|
||||
environment0,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
root := config.Root{
|
||||
Resources: config.Resources{
|
||||
Jobs: map[string]*resources.Job{
|
||||
"job0": job0,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
actual := visitJobPaths(t, root)
|
||||
expected := []dyn.Path{
|
||||
dyn.MustPathFromString("resources.jobs.job0.environments[0].spec.dependencies[0]"),
|
||||
dyn.MustPathFromString("resources.jobs.job0.environments[0].spec.dependencies[1]"),
|
||||
}
|
||||
|
||||
assert.ElementsMatch(t, expected, actual)
|
||||
}
|
||||
|
||||
func TestVisitJobPaths_foreach(t *testing.T) {
|
||||
task0 := jobs.Task{
|
||||
ForEachTask: &jobs.ForEachTask{
|
||||
Task: jobs.Task{
|
||||
NotebookTask: &jobs.NotebookTask{
|
||||
NotebookPath: "abc",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
job0 := &resources.Job{
|
||||
JobSettings: &jobs.JobSettings{
|
||||
Tasks: []jobs.Task{
|
||||
task0,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
root := config.Root{
|
||||
Resources: config.Resources{
|
||||
Jobs: map[string]*resources.Job{
|
||||
"job0": job0,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
actual := visitJobPaths(t, root)
|
||||
expected := []dyn.Path{
|
||||
dyn.MustPathFromString("resources.jobs.job0.tasks[0].for_each_task.task.notebook_task.notebook_path"),
|
||||
}
|
||||
|
||||
assert.ElementsMatch(t, expected, actual)
|
||||
}
|
||||
|
||||
func visitJobPaths(t *testing.T, root config.Root) []dyn.Path {
|
||||
var actual []dyn.Path
|
||||
err := root.Mutate(func(value dyn.Value) (dyn.Value, error) {
|
||||
return VisitJobPaths(value, func(p dyn.Path, kind PathKind, v dyn.Value) (dyn.Value, error) {
|
||||
actual = append(actual, p)
|
||||
return v, nil
|
||||
})
|
||||
})
|
||||
require.NoError(t, err)
|
||||
return actual
|
||||
}
|
|
@ -0,0 +1,26 @@
|
|||
package paths
|
||||
|
||||
import "github.com/databricks/cli/libs/dyn"
|
||||
|
||||
type PathKind int
|
||||
|
||||
const (
|
||||
// PathKindLibrary is a path to a library file
|
||||
PathKindLibrary = iota
|
||||
|
||||
// PathKindNotebook is a path to a notebook file
|
||||
PathKindNotebook
|
||||
|
||||
// PathKindWorkspaceFile is a path to a regular workspace file,
|
||||
// notebooks are not allowed because they are uploaded a special
|
||||
// kind of workspace object.
|
||||
PathKindWorkspaceFile
|
||||
|
||||
// PathKindWithPrefix is a path that starts with './'
|
||||
PathKindWithPrefix
|
||||
|
||||
// PathKindDirectory is a path to directory
|
||||
PathKindDirectory
|
||||
)
|
||||
|
||||
type VisitFunc func(path dyn.Path, kind PathKind, value dyn.Value) (dyn.Value, error)
|
|
@ -5,8 +5,8 @@ import (
|
|||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/bundle/config"
|
||||
"github.com/databricks/cli/libs/auth"
|
||||
"github.com/databricks/cli/libs/diag"
|
||||
"github.com/databricks/cli/libs/iamutil"
|
||||
"github.com/databricks/cli/libs/tags"
|
||||
)
|
||||
|
||||
|
@ -33,7 +33,7 @@ func (m *populateCurrentUser) Apply(ctx context.Context, b *bundle.Bundle) diag.
|
|||
}
|
||||
|
||||
b.Config.Workspace.CurrentUser = &config.User{
|
||||
ShortName: auth.GetShortUserName(me.UserName),
|
||||
ShortName: iamutil.GetShortUserName(me),
|
||||
User: me,
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1,67 @@
|
|||
package mutator
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/libs/diag"
|
||||
"github.com/databricks/cli/libs/dyn"
|
||||
)
|
||||
|
||||
type prependWorkspacePrefix struct{}
|
||||
|
||||
// PrependWorkspacePrefix prepends the workspace root path to all paths in the bundle.
|
||||
func PrependWorkspacePrefix() bundle.Mutator {
|
||||
return &prependWorkspacePrefix{}
|
||||
}
|
||||
|
||||
func (m *prependWorkspacePrefix) Name() string {
|
||||
return "PrependWorkspacePrefix"
|
||||
}
|
||||
|
||||
var skipPrefixes = []string{
|
||||
"/Workspace/",
|
||||
"/Volumes/",
|
||||
}
|
||||
|
||||
func (m *prependWorkspacePrefix) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||
patterns := []dyn.Pattern{
|
||||
dyn.NewPattern(dyn.Key("workspace"), dyn.Key("root_path")),
|
||||
dyn.NewPattern(dyn.Key("workspace"), dyn.Key("file_path")),
|
||||
dyn.NewPattern(dyn.Key("workspace"), dyn.Key("artifact_path")),
|
||||
dyn.NewPattern(dyn.Key("workspace"), dyn.Key("state_path")),
|
||||
}
|
||||
|
||||
err := b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) {
|
||||
var err error
|
||||
for _, pattern := range patterns {
|
||||
v, err = dyn.MapByPattern(v, pattern, func(p dyn.Path, pv dyn.Value) (dyn.Value, error) {
|
||||
path, ok := pv.AsString()
|
||||
if !ok {
|
||||
return dyn.InvalidValue, fmt.Errorf("expected string, got %s", v.Kind())
|
||||
}
|
||||
|
||||
for _, prefix := range skipPrefixes {
|
||||
if strings.HasPrefix(path, prefix) {
|
||||
return pv, nil
|
||||
}
|
||||
}
|
||||
|
||||
return dyn.NewValue(fmt.Sprintf("/Workspace%s", path), v.Locations()), nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return dyn.InvalidValue, err
|
||||
}
|
||||
}
|
||||
return v, nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,79 @@
|
|||
package mutator
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/bundle/config"
|
||||
"github.com/databricks/databricks-sdk-go/service/iam"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestPrependWorkspacePrefix(t *testing.T) {
|
||||
testCases := []struct {
|
||||
path string
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
path: "/Users/test",
|
||||
expected: "/Workspace/Users/test",
|
||||
},
|
||||
{
|
||||
path: "/Shared/test",
|
||||
expected: "/Workspace/Shared/test",
|
||||
},
|
||||
{
|
||||
path: "/Workspace/Users/test",
|
||||
expected: "/Workspace/Users/test",
|
||||
},
|
||||
{
|
||||
path: "/Volumes/Users/test",
|
||||
expected: "/Volumes/Users/test",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
b := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Workspace: config.Workspace{
|
||||
RootPath: tc.path,
|
||||
ArtifactPath: tc.path,
|
||||
FilePath: tc.path,
|
||||
StatePath: tc.path,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
diags := bundle.Apply(context.Background(), b, PrependWorkspacePrefix())
|
||||
require.Empty(t, diags)
|
||||
require.Equal(t, tc.expected, b.Config.Workspace.RootPath)
|
||||
require.Equal(t, tc.expected, b.Config.Workspace.ArtifactPath)
|
||||
require.Equal(t, tc.expected, b.Config.Workspace.FilePath)
|
||||
require.Equal(t, tc.expected, b.Config.Workspace.StatePath)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPrependWorkspaceForDefaultConfig(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Bundle: config.Bundle{
|
||||
Name: "test",
|
||||
Target: "dev",
|
||||
},
|
||||
Workspace: config.Workspace{
|
||||
CurrentUser: &config.User{
|
||||
User: &iam.User{
|
||||
UserName: "jane@doe.com",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
diags := bundle.Apply(context.Background(), b, bundle.Seq(DefineDefaultWorkspaceRoot(), ExpandWorkspaceRoot(), DefineDefaultWorkspacePaths(), PrependWorkspacePrefix()))
|
||||
require.Empty(t, diags)
|
||||
require.Equal(t, "/Workspace/Users/jane@doe.com/.bundle/test/dev", b.Config.Workspace.RootPath)
|
||||
require.Equal(t, "/Workspace/Users/jane@doe.com/.bundle/test/dev/artifacts", b.Config.Workspace.ArtifactPath)
|
||||
require.Equal(t, "/Workspace/Users/jane@doe.com/.bundle/test/dev/files", b.Config.Workspace.FilePath)
|
||||
require.Equal(t, "/Workspace/Users/jane@doe.com/.bundle/test/dev/state", b.Config.Workspace.StatePath)
|
||||
}
|
|
@ -6,9 +6,9 @@ import (
|
|||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/bundle/config"
|
||||
"github.com/databricks/cli/libs/auth"
|
||||
"github.com/databricks/cli/libs/diag"
|
||||
"github.com/databricks/cli/libs/dyn"
|
||||
"github.com/databricks/cli/libs/iamutil"
|
||||
"github.com/databricks/cli/libs/log"
|
||||
)
|
||||
|
||||
|
@ -118,15 +118,18 @@ func findNonUserPath(b *bundle.Bundle) string {
|
|||
if b.Config.Workspace.RootPath != "" && !containsName(b.Config.Workspace.RootPath) {
|
||||
return "root_path"
|
||||
}
|
||||
if b.Config.Workspace.StatePath != "" && !containsName(b.Config.Workspace.StatePath) {
|
||||
return "state_path"
|
||||
}
|
||||
if b.Config.Workspace.FilePath != "" && !containsName(b.Config.Workspace.FilePath) {
|
||||
return "file_path"
|
||||
}
|
||||
if b.Config.Workspace.ResourcePath != "" && !containsName(b.Config.Workspace.ResourcePath) {
|
||||
return "resource_path"
|
||||
}
|
||||
if b.Config.Workspace.ArtifactPath != "" && !containsName(b.Config.Workspace.ArtifactPath) {
|
||||
return "artifact_path"
|
||||
}
|
||||
if b.Config.Workspace.StatePath != "" && !containsName(b.Config.Workspace.StatePath) {
|
||||
return "state_path"
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
|
@ -171,7 +174,7 @@ func (m *processTargetMode) Apply(ctx context.Context, b *bundle.Bundle) diag.Di
|
|||
transformDevelopmentMode(ctx, b)
|
||||
return diags
|
||||
case config.Production:
|
||||
isPrincipal := auth.IsServicePrincipal(b.Config.Workspace.CurrentUser.UserName)
|
||||
isPrincipal := iamutil.IsServicePrincipal(b.Config.Workspace.CurrentUser.User)
|
||||
return validateProductionMode(ctx, b, isPrincipal)
|
||||
case "":
|
||||
// No action
|
||||
|
|
|
@ -13,6 +13,7 @@ import (
|
|||
"github.com/databricks/cli/libs/tags"
|
||||
sdkconfig "github.com/databricks/databricks-sdk-go/config"
|
||||
"github.com/databricks/databricks-sdk-go/service/catalog"
|
||||
"github.com/databricks/databricks-sdk-go/service/compute"
|
||||
"github.com/databricks/databricks-sdk-go/service/iam"
|
||||
"github.com/databricks/databricks-sdk-go/service/jobs"
|
||||
"github.com/databricks/databricks-sdk-go/service/ml"
|
||||
|
@ -122,6 +123,9 @@ func mockBundle(mode config.Mode) *bundle.Bundle {
|
|||
Volumes: map[string]*resources.Volume{
|
||||
"volume1": {CreateVolumeRequestContent: &catalog.CreateVolumeRequestContent{Name: "volume1"}},
|
||||
},
|
||||
Clusters: map[string]*resources.Cluster{
|
||||
"cluster1": {ClusterSpec: &compute.ClusterSpec{ClusterName: "cluster1", SparkVersion: "13.2.x", NumWorkers: 1}},
|
||||
},
|
||||
},
|
||||
},
|
||||
// Use AWS implementation for testing.
|
||||
|
@ -180,6 +184,9 @@ func TestProcessTargetModeDevelopment(t *testing.T) {
|
|||
|
||||
// Schema 1
|
||||
assert.Equal(t, "dev_lennart_schema1", b.Config.Resources.Schemas["schema1"].Name)
|
||||
|
||||
// Clusters
|
||||
assert.Equal(t, "[dev lennart] cluster1", b.Config.Resources.Clusters["cluster1"].ClusterName)
|
||||
}
|
||||
|
||||
func TestProcessTargetModeDevelopmentTagNormalizationForAws(t *testing.T) {
|
||||
|
@ -286,6 +293,7 @@ func TestProcessTargetModeDefault(t *testing.T) {
|
|||
assert.Equal(t, "qualityMonitor1", b.Config.Resources.QualityMonitors["qualityMonitor1"].TableName)
|
||||
assert.Equal(t, "schema1", b.Config.Resources.Schemas["schema1"].Name)
|
||||
assert.Equal(t, "volume1", b.Config.Resources.Volumes["volume1"].Name)
|
||||
assert.Equal(t, "cluster1", b.Config.Resources.Clusters["cluster1"].ClusterName)
|
||||
}
|
||||
|
||||
func TestProcessTargetModeProduction(t *testing.T) {
|
||||
|
@ -317,6 +325,7 @@ func TestProcessTargetModeProduction(t *testing.T) {
|
|||
b.Config.Resources.Experiments["experiment2"].Permissions = permissions
|
||||
b.Config.Resources.Models["model1"].Permissions = permissions
|
||||
b.Config.Resources.ModelServingEndpoints["servingendpoint1"].Permissions = permissions
|
||||
b.Config.Resources.Clusters["cluster1"].Permissions = permissions
|
||||
|
||||
diags = validateProductionMode(context.Background(), b, false)
|
||||
require.NoError(t, diags.Error())
|
||||
|
@ -329,6 +338,7 @@ func TestProcessTargetModeProduction(t *testing.T) {
|
|||
assert.Equal(t, "qualityMonitor1", b.Config.Resources.QualityMonitors["qualityMonitor1"].TableName)
|
||||
assert.Equal(t, "schema1", b.Config.Resources.Schemas["schema1"].Name)
|
||||
assert.Equal(t, "volume1", b.Config.Resources.Volumes["volume1"].Name)
|
||||
assert.Equal(t, "cluster1", b.Config.Resources.Clusters["cluster1"].ClusterName)
|
||||
}
|
||||
|
||||
func TestProcessTargetModeProductionOkForPrincipal(t *testing.T) {
|
||||
|
|
|
@ -108,7 +108,7 @@ func (m *pythonMutator) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagno
|
|||
return dyn.InvalidValue, fmt.Errorf("failed to create cache dir: %w", err)
|
||||
}
|
||||
|
||||
rightRoot, diags := m.runPythonMutator(ctx, cacheDir, b.RootPath, pythonPath, leftRoot)
|
||||
rightRoot, diags := m.runPythonMutator(ctx, cacheDir, b.BundleRootPath, pythonPath, leftRoot)
|
||||
mutateDiags = diags
|
||||
if diags.HasError() {
|
||||
return dyn.InvalidValue, mutateDiagsHasError
|
||||
|
|
|
@ -45,15 +45,15 @@ func (m *rewriteSyncPaths) makeRelativeTo(root string) dyn.MapFunc {
|
|||
func (m *rewriteSyncPaths) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||
err := b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) {
|
||||
return dyn.Map(v, "sync", func(_ dyn.Path, v dyn.Value) (nv dyn.Value, err error) {
|
||||
v, err = dyn.Map(v, "paths", dyn.Foreach(m.makeRelativeTo(b.RootPath)))
|
||||
v, err = dyn.Map(v, "paths", dyn.Foreach(m.makeRelativeTo(b.BundleRootPath)))
|
||||
if err != nil {
|
||||
return dyn.InvalidValue, err
|
||||
}
|
||||
v, err = dyn.Map(v, "include", dyn.Foreach(m.makeRelativeTo(b.RootPath)))
|
||||
v, err = dyn.Map(v, "include", dyn.Foreach(m.makeRelativeTo(b.BundleRootPath)))
|
||||
if err != nil {
|
||||
return dyn.InvalidValue, err
|
||||
}
|
||||
v, err = dyn.Map(v, "exclude", dyn.Foreach(m.makeRelativeTo(b.RootPath)))
|
||||
v, err = dyn.Map(v, "exclude", dyn.Foreach(m.makeRelativeTo(b.BundleRootPath)))
|
||||
if err != nil {
|
||||
return dyn.InvalidValue, err
|
||||
}
|
||||
|
|
|
@ -9,12 +9,13 @@ import (
|
|||
"github.com/databricks/cli/bundle/config"
|
||||
"github.com/databricks/cli/bundle/config/mutator"
|
||||
"github.com/databricks/cli/bundle/internal/bundletest"
|
||||
"github.com/databricks/cli/libs/dyn"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestRewriteSyncPathsRelative(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
RootPath: ".",
|
||||
BundleRootPath: ".",
|
||||
Config: config.Root{
|
||||
Sync: config.Sync{
|
||||
Paths: []string{
|
||||
|
@ -33,12 +34,12 @@ func TestRewriteSyncPathsRelative(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
bundletest.SetLocation(b, "sync.paths[0]", "./databricks.yml")
|
||||
bundletest.SetLocation(b, "sync.paths[1]", "./databricks.yml")
|
||||
bundletest.SetLocation(b, "sync.include[0]", "./file.yml")
|
||||
bundletest.SetLocation(b, "sync.include[1]", "./a/file.yml")
|
||||
bundletest.SetLocation(b, "sync.exclude[0]", "./a/b/file.yml")
|
||||
bundletest.SetLocation(b, "sync.exclude[1]", "./a/b/c/file.yml")
|
||||
bundletest.SetLocation(b, "sync.paths[0]", []dyn.Location{{File: "./databricks.yml"}})
|
||||
bundletest.SetLocation(b, "sync.paths[1]", []dyn.Location{{File: "./databricks.yml"}})
|
||||
bundletest.SetLocation(b, "sync.include[0]", []dyn.Location{{File: "./file.yml"}})
|
||||
bundletest.SetLocation(b, "sync.include[1]", []dyn.Location{{File: "./a/file.yml"}})
|
||||
bundletest.SetLocation(b, "sync.exclude[0]", []dyn.Location{{File: "./a/b/file.yml"}})
|
||||
bundletest.SetLocation(b, "sync.exclude[1]", []dyn.Location{{File: "./a/b/c/file.yml"}})
|
||||
|
||||
diags := bundle.Apply(context.Background(), b, mutator.RewriteSyncPaths())
|
||||
assert.NoError(t, diags.Error())
|
||||
|
@ -53,7 +54,7 @@ func TestRewriteSyncPathsRelative(t *testing.T) {
|
|||
|
||||
func TestRewriteSyncPathsAbsolute(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
RootPath: "/tmp/dir",
|
||||
BundleRootPath: "/tmp/dir",
|
||||
Config: config.Root{
|
||||
Sync: config.Sync{
|
||||
Paths: []string{
|
||||
|
@ -72,12 +73,12 @@ func TestRewriteSyncPathsAbsolute(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
bundletest.SetLocation(b, "sync.paths[0]", "/tmp/dir/databricks.yml")
|
||||
bundletest.SetLocation(b, "sync.paths[1]", "/tmp/dir/databricks.yml")
|
||||
bundletest.SetLocation(b, "sync.include[0]", "/tmp/dir/file.yml")
|
||||
bundletest.SetLocation(b, "sync.include[1]", "/tmp/dir/a/file.yml")
|
||||
bundletest.SetLocation(b, "sync.exclude[0]", "/tmp/dir/a/b/file.yml")
|
||||
bundletest.SetLocation(b, "sync.exclude[1]", "/tmp/dir/a/b/c/file.yml")
|
||||
bundletest.SetLocation(b, "sync.paths[0]", []dyn.Location{{File: "/tmp/dir/databricks.yml"}})
|
||||
bundletest.SetLocation(b, "sync.paths[1]", []dyn.Location{{File: "/tmp/dir/databricks.yml"}})
|
||||
bundletest.SetLocation(b, "sync.include[0]", []dyn.Location{{File: "/tmp/dir/file.yml"}})
|
||||
bundletest.SetLocation(b, "sync.include[1]", []dyn.Location{{File: "/tmp/dir/a/file.yml"}})
|
||||
bundletest.SetLocation(b, "sync.exclude[0]", []dyn.Location{{File: "/tmp/dir/a/b/file.yml"}})
|
||||
bundletest.SetLocation(b, "sync.exclude[1]", []dyn.Location{{File: "/tmp/dir/a/b/c/file.yml"}})
|
||||
|
||||
diags := bundle.Apply(context.Background(), b, mutator.RewriteSyncPaths())
|
||||
assert.NoError(t, diags.Error())
|
||||
|
@ -93,7 +94,7 @@ func TestRewriteSyncPathsAbsolute(t *testing.T) {
|
|||
func TestRewriteSyncPathsErrorPaths(t *testing.T) {
|
||||
t.Run("no sync block", func(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
RootPath: ".",
|
||||
BundleRootPath: ".",
|
||||
}
|
||||
|
||||
diags := bundle.Apply(context.Background(), b, mutator.RewriteSyncPaths())
|
||||
|
@ -102,7 +103,7 @@ func TestRewriteSyncPathsErrorPaths(t *testing.T) {
|
|||
|
||||
t.Run("empty include/exclude blocks", func(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
RootPath: ".",
|
||||
BundleRootPath: ".",
|
||||
Config: config.Root{
|
||||
Sync: config.Sync{
|
||||
Include: []string{},
|
||||
|
|
|
@ -0,0 +1,72 @@
|
|||
package mutator
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/libs/diag"
|
||||
"github.com/databricks/cli/libs/dyn"
|
||||
)
|
||||
|
||||
type rewriteWorkspacePrefix struct{}
|
||||
|
||||
// RewriteWorkspacePrefix finds any strings in bundle configration that have
|
||||
// workspace prefix plus workspace path variable used and removes workspace prefix from it.
|
||||
func RewriteWorkspacePrefix() bundle.Mutator {
|
||||
return &rewriteWorkspacePrefix{}
|
||||
}
|
||||
|
||||
func (m *rewriteWorkspacePrefix) Name() string {
|
||||
return "RewriteWorkspacePrefix"
|
||||
}
|
||||
|
||||
func (m *rewriteWorkspacePrefix) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||
diags := diag.Diagnostics{}
|
||||
paths := map[string]string{
|
||||
"/Workspace/${workspace.root_path}": "${workspace.root_path}",
|
||||
"/Workspace${workspace.root_path}": "${workspace.root_path}",
|
||||
"/Workspace/${workspace.file_path}": "${workspace.file_path}",
|
||||
"/Workspace${workspace.file_path}": "${workspace.file_path}",
|
||||
"/Workspace/${workspace.artifact_path}": "${workspace.artifact_path}",
|
||||
"/Workspace${workspace.artifact_path}": "${workspace.artifact_path}",
|
||||
"/Workspace/${workspace.state_path}": "${workspace.state_path}",
|
||||
"/Workspace${workspace.state_path}": "${workspace.state_path}",
|
||||
}
|
||||
|
||||
err := b.Config.Mutate(func(root dyn.Value) (dyn.Value, error) {
|
||||
// Walk through the bundle configuration, check all the string leafs and
|
||||
// see if any of the prefixes are used in the remote path.
|
||||
return dyn.Walk(root, func(p dyn.Path, v dyn.Value) (dyn.Value, error) {
|
||||
vv, ok := v.AsString()
|
||||
if !ok {
|
||||
return v, nil
|
||||
}
|
||||
|
||||
for path, replacePath := range paths {
|
||||
if strings.Contains(vv, path) {
|
||||
newPath := strings.Replace(vv, path, replacePath, 1)
|
||||
diags = append(diags, diag.Diagnostic{
|
||||
Severity: diag.Warning,
|
||||
Summary: fmt.Sprintf("substring %q found in %q. Please update this to %q.", path, vv, newPath),
|
||||
Detail: "For more information, please refer to: https://docs.databricks.com/en/release-notes/dev-tools/bundles.html#workspace-paths",
|
||||
Locations: v.Locations(),
|
||||
Paths: []dyn.Path{p},
|
||||
})
|
||||
|
||||
// Remove the workspace prefix from the string.
|
||||
return dyn.NewValue(newPath, v.Locations()), nil
|
||||
}
|
||||
}
|
||||
|
||||
return v, nil
|
||||
})
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
||||
return diags
|
||||
}
|
|
@ -0,0 +1,85 @@
|
|||
package mutator
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/bundle/config"
|
||||
"github.com/databricks/cli/bundle/config/resources"
|
||||
"github.com/databricks/cli/libs/diag"
|
||||
"github.com/databricks/databricks-sdk-go/service/compute"
|
||||
"github.com/databricks/databricks-sdk-go/service/jobs"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestNoWorkspacePrefixUsed(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Workspace: config.Workspace{
|
||||
RootPath: "/Workspace/Users/test",
|
||||
ArtifactPath: "/Workspace/Users/test/artifacts",
|
||||
FilePath: "/Workspace/Users/test/files",
|
||||
StatePath: "/Workspace/Users/test/state",
|
||||
},
|
||||
|
||||
Resources: config.Resources{
|
||||
Jobs: map[string]*resources.Job{
|
||||
"test_job": {
|
||||
JobSettings: &jobs.JobSettings{
|
||||
Tasks: []jobs.Task{
|
||||
{
|
||||
SparkPythonTask: &jobs.SparkPythonTask{
|
||||
PythonFile: "/Workspace/${workspace.root_path}/file1.py",
|
||||
},
|
||||
},
|
||||
{
|
||||
NotebookTask: &jobs.NotebookTask{
|
||||
NotebookPath: "/Workspace${workspace.file_path}/notebook1",
|
||||
},
|
||||
Libraries: []compute.Library{
|
||||
{
|
||||
Jar: "/Workspace/${workspace.artifact_path}/jar1.jar",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
NotebookTask: &jobs.NotebookTask{
|
||||
NotebookPath: "${workspace.file_path}/notebook2",
|
||||
},
|
||||
Libraries: []compute.Library{
|
||||
{
|
||||
Jar: "${workspace.artifact_path}/jar2.jar",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
diags := bundle.Apply(context.Background(), b, RewriteWorkspacePrefix())
|
||||
require.Len(t, diags, 3)
|
||||
|
||||
expectedErrors := map[string]bool{
|
||||
`substring "/Workspace/${workspace.root_path}" found in "/Workspace/${workspace.root_path}/file1.py". Please update this to "${workspace.root_path}/file1.py".`: true,
|
||||
`substring "/Workspace${workspace.file_path}" found in "/Workspace${workspace.file_path}/notebook1". Please update this to "${workspace.file_path}/notebook1".`: true,
|
||||
`substring "/Workspace/${workspace.artifact_path}" found in "/Workspace/${workspace.artifact_path}/jar1.jar". Please update this to "${workspace.artifact_path}/jar1.jar".`: true,
|
||||
}
|
||||
|
||||
for _, d := range diags {
|
||||
require.Equal(t, d.Severity, diag.Warning)
|
||||
require.Contains(t, expectedErrors, d.Summary)
|
||||
delete(expectedErrors, d.Summary)
|
||||
}
|
||||
|
||||
require.Equal(t, "${workspace.root_path}/file1.py", b.Config.Resources.Jobs["test_job"].JobSettings.Tasks[0].SparkPythonTask.PythonFile)
|
||||
require.Equal(t, "${workspace.file_path}/notebook1", b.Config.Resources.Jobs["test_job"].JobSettings.Tasks[1].NotebookTask.NotebookPath)
|
||||
require.Equal(t, "${workspace.artifact_path}/jar1.jar", b.Config.Resources.Jobs["test_job"].JobSettings.Tasks[1].Libraries[0].Jar)
|
||||
require.Equal(t, "${workspace.file_path}/notebook2", b.Config.Resources.Jobs["test_job"].JobSettings.Tasks[2].NotebookTask.NotebookPath)
|
||||
require.Equal(t, "${workspace.artifact_path}/jar2.jar", b.Config.Resources.Jobs["test_job"].JobSettings.Tasks[2].Libraries[0].Jar)
|
||||
|
||||
}
|
|
@ -30,50 +30,44 @@ func (m *setRunAs) Name() string {
|
|||
return "SetRunAs"
|
||||
}
|
||||
|
||||
type errUnsupportedResourceTypeForRunAs struct {
|
||||
resourceType string
|
||||
resourceLocation dyn.Location
|
||||
currentUser string
|
||||
runAsUser string
|
||||
func reportRunAsNotSupported(resourceType string, location dyn.Location, currentUser string, runAsUser string) diag.Diagnostics {
|
||||
return diag.Diagnostics{{
|
||||
Summary: fmt.Sprintf("%s do not support a setting a run_as user that is different from the owner.\n"+
|
||||
"Current identity: %s. Run as identity: %s.\n"+
|
||||
"See https://docs.databricks.com/dev-tools/bundles/run-as.html to learn more about the run_as property.", resourceType, currentUser, runAsUser),
|
||||
Locations: []dyn.Location{location},
|
||||
Severity: diag.Error,
|
||||
}}
|
||||
}
|
||||
|
||||
func (e errUnsupportedResourceTypeForRunAs) Error() string {
|
||||
return fmt.Sprintf("%s are not supported when the current deployment user is different from the bundle's run_as identity. Please deploy as the run_as identity. Please refer to the documentation at https://docs.databricks.com/dev-tools/bundles/run-as.html for more details. Location of the unsupported resource: %s. Current identity: %s. Run as identity: %s", e.resourceType, e.resourceLocation, e.currentUser, e.runAsUser)
|
||||
}
|
||||
func validateRunAs(b *bundle.Bundle) diag.Diagnostics {
|
||||
diags := diag.Diagnostics{}
|
||||
|
||||
type errBothSpAndUserSpecified struct {
|
||||
spName string
|
||||
spLoc dyn.Location
|
||||
userName string
|
||||
userLoc dyn.Location
|
||||
}
|
||||
neitherSpecifiedErr := diag.Diagnostics{{
|
||||
Summary: "run_as section must specify exactly one identity. Neither service_principal_name nor user_name is specified",
|
||||
Locations: []dyn.Location{b.Config.GetLocation("run_as")},
|
||||
Severity: diag.Error,
|
||||
}}
|
||||
|
||||
func (e errBothSpAndUserSpecified) Error() string {
|
||||
return fmt.Sprintf("run_as section must specify exactly one identity. A service_principal_name %q is specified at %s. A user_name %q is defined at %s", e.spName, e.spLoc, e.userName, e.userLoc)
|
||||
}
|
||||
|
||||
func validateRunAs(b *bundle.Bundle) error {
|
||||
neitherSpecifiedErr := fmt.Errorf("run_as section must specify exactly one identity. Neither service_principal_name nor user_name is specified at %s", b.Config.GetLocation("run_as"))
|
||||
// Error if neither service_principal_name nor user_name are specified, but the
|
||||
// Fail fast if neither service_principal_name nor user_name are specified, but the
|
||||
// run_as section is present.
|
||||
if b.Config.Value().Get("run_as").Kind() == dyn.KindNil {
|
||||
return neitherSpecifiedErr
|
||||
}
|
||||
// Error if one or both of service_principal_name and user_name are specified,
|
||||
|
||||
// Fail fast if one or both of service_principal_name and user_name are specified,
|
||||
// but with empty values.
|
||||
if b.Config.RunAs.ServicePrincipalName == "" && b.Config.RunAs.UserName == "" {
|
||||
runAs := b.Config.RunAs
|
||||
if runAs.ServicePrincipalName == "" && runAs.UserName == "" {
|
||||
return neitherSpecifiedErr
|
||||
}
|
||||
|
||||
// Error if both service_principal_name and user_name are specified
|
||||
runAs := b.Config.RunAs
|
||||
if runAs.UserName != "" && runAs.ServicePrincipalName != "" {
|
||||
return errBothSpAndUserSpecified{
|
||||
spName: runAs.ServicePrincipalName,
|
||||
userName: runAs.UserName,
|
||||
spLoc: b.Config.GetLocation("run_as.service_principal_name"),
|
||||
userLoc: b.Config.GetLocation("run_as.user_name"),
|
||||
}
|
||||
diags = diags.Extend(diag.Diagnostics{{
|
||||
Summary: "run_as section cannot specify both user_name and service_principal_name",
|
||||
Locations: []dyn.Location{b.Config.GetLocation("run_as")},
|
||||
Severity: diag.Error,
|
||||
}})
|
||||
}
|
||||
|
||||
identity := runAs.ServicePrincipalName
|
||||
|
@ -83,40 +77,40 @@ func validateRunAs(b *bundle.Bundle) error {
|
|||
|
||||
// All resources are supported if the run_as identity is the same as the current deployment identity.
|
||||
if identity == b.Config.Workspace.CurrentUser.UserName {
|
||||
return nil
|
||||
return diags
|
||||
}
|
||||
|
||||
// DLT pipelines do not support run_as in the API.
|
||||
if len(b.Config.Resources.Pipelines) > 0 {
|
||||
return errUnsupportedResourceTypeForRunAs{
|
||||
resourceType: "pipelines",
|
||||
resourceLocation: b.Config.GetLocation("resources.pipelines"),
|
||||
currentUser: b.Config.Workspace.CurrentUser.UserName,
|
||||
runAsUser: identity,
|
||||
}
|
||||
diags = diags.Extend(reportRunAsNotSupported(
|
||||
"pipelines",
|
||||
b.Config.GetLocation("resources.pipelines"),
|
||||
b.Config.Workspace.CurrentUser.UserName,
|
||||
identity,
|
||||
))
|
||||
}
|
||||
|
||||
// Model serving endpoints do not support run_as in the API.
|
||||
if len(b.Config.Resources.ModelServingEndpoints) > 0 {
|
||||
return errUnsupportedResourceTypeForRunAs{
|
||||
resourceType: "model_serving_endpoints",
|
||||
resourceLocation: b.Config.GetLocation("resources.model_serving_endpoints"),
|
||||
currentUser: b.Config.Workspace.CurrentUser.UserName,
|
||||
runAsUser: identity,
|
||||
}
|
||||
diags = diags.Extend(reportRunAsNotSupported(
|
||||
"model_serving_endpoints",
|
||||
b.Config.GetLocation("resources.model_serving_endpoints"),
|
||||
b.Config.Workspace.CurrentUser.UserName,
|
||||
identity,
|
||||
))
|
||||
}
|
||||
|
||||
// Monitors do not support run_as in the API.
|
||||
if len(b.Config.Resources.QualityMonitors) > 0 {
|
||||
return errUnsupportedResourceTypeForRunAs{
|
||||
resourceType: "quality_monitors",
|
||||
resourceLocation: b.Config.GetLocation("resources.quality_monitors"),
|
||||
currentUser: b.Config.Workspace.CurrentUser.UserName,
|
||||
runAsUser: identity,
|
||||
}
|
||||
diags = diags.Extend(reportRunAsNotSupported(
|
||||
"quality_monitors",
|
||||
b.Config.GetLocation("resources.quality_monitors"),
|
||||
b.Config.Workspace.CurrentUser.UserName,
|
||||
identity,
|
||||
))
|
||||
}
|
||||
|
||||
return nil
|
||||
return diags
|
||||
}
|
||||
|
||||
func setRunAsForJobs(b *bundle.Bundle) {
|
||||
|
@ -187,8 +181,9 @@ func (m *setRunAs) Apply(_ context.Context, b *bundle.Bundle) diag.Diagnostics {
|
|||
}
|
||||
|
||||
// Assert the run_as configuration is valid in the context of the bundle
|
||||
if err := validateRunAs(b); err != nil {
|
||||
return diag.FromErr(err)
|
||||
diags := validateRunAs(b)
|
||||
if diags.HasError() {
|
||||
return diags
|
||||
}
|
||||
|
||||
setRunAsForJobs(b)
|
||||
|
|
|
@ -32,6 +32,7 @@ func allResourceTypes(t *testing.T) []string {
|
|||
// the dyn library gives us the correct list of all resources supported. Please
|
||||
// also update this check when adding a new resource
|
||||
require.Equal(t, []string{
|
||||
"clusters",
|
||||
"experiments",
|
||||
"jobs",
|
||||
"model_serving_endpoints",
|
||||
|
@ -134,6 +135,7 @@ func TestRunAsErrorForUnsupportedResources(t *testing.T) {
|
|||
// some point in the future. These resources are (implicitly) on the deny list, since
|
||||
// they are not on the allow list below.
|
||||
allowList := []string{
|
||||
"clusters",
|
||||
"jobs",
|
||||
"models",
|
||||
"registered_models",
|
||||
|
@ -188,11 +190,8 @@ func TestRunAsErrorForUnsupportedResources(t *testing.T) {
|
|||
Config: *r,
|
||||
}
|
||||
diags := bundle.Apply(context.Background(), b, SetRunAs())
|
||||
assert.Equal(t, diags.Error().Error(), errUnsupportedResourceTypeForRunAs{
|
||||
resourceType: rt,
|
||||
resourceLocation: dyn.Location{},
|
||||
currentUser: "alice",
|
||||
runAsUser: "bob",
|
||||
}.Error(), "expected run_as with a different identity than the current deployment user to not supported for resources of type: %s", rt)
|
||||
assert.Contains(t, diags.Error().Error(), "do not support a setting a run_as user that is different from the owner.\n"+
|
||||
"Current identity: alice. Run as identity: bob.\n"+
|
||||
"See https://docs.databricks.com/dev-tools/bundles/run-as.html to learn more about the run_as property.", rt)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -15,7 +15,7 @@ import (
|
|||
|
||||
func TestSyncDefaultPath_DefaultIfUnset(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
RootPath: "/tmp/some/dir",
|
||||
BundleRootPath: "/tmp/some/dir",
|
||||
Config: config.Root{},
|
||||
}
|
||||
|
||||
|
@ -51,7 +51,7 @@ func TestSyncDefaultPath_SkipIfSet(t *testing.T) {
|
|||
for _, tcase := range tcases {
|
||||
t.Run(tcase.name, func(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
RootPath: "/tmp/some/dir",
|
||||
BundleRootPath: "/tmp/some/dir",
|
||||
Config: config.Root{},
|
||||
}
|
||||
|
||||
|
|
|
@ -57,7 +57,7 @@ func (m *syncInferRoot) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagno
|
|||
var diags diag.Diagnostics
|
||||
|
||||
// Use the bundle root path as the starting point for inferring the sync root path.
|
||||
bundleRootPath := filepath.Clean(b.RootPath)
|
||||
bundleRootPath := filepath.Clean(b.BundleRootPath)
|
||||
|
||||
// Infer the sync root path by looking at each one of the sync paths.
|
||||
// Every sync path must be a descendant of the final sync root path.
|
||||
|
|
|
@ -9,13 +9,14 @@ import (
|
|||
"github.com/databricks/cli/bundle/config"
|
||||
"github.com/databricks/cli/bundle/config/mutator"
|
||||
"github.com/databricks/cli/bundle/internal/bundletest"
|
||||
"github.com/databricks/cli/libs/dyn"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestSyncInferRoot_NominalAbsolute(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
RootPath: "/tmp/some/dir",
|
||||
BundleRootPath: "/tmp/some/dir",
|
||||
Config: config.Root{
|
||||
Sync: config.Sync{
|
||||
Paths: []string{
|
||||
|
@ -46,7 +47,7 @@ func TestSyncInferRoot_NominalAbsolute(t *testing.T) {
|
|||
|
||||
func TestSyncInferRoot_NominalRelative(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
RootPath: "./some/dir",
|
||||
BundleRootPath: "./some/dir",
|
||||
Config: config.Root{
|
||||
Sync: config.Sync{
|
||||
Paths: []string{
|
||||
|
@ -77,7 +78,7 @@ func TestSyncInferRoot_NominalRelative(t *testing.T) {
|
|||
|
||||
func TestSyncInferRoot_ParentDirectory(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
RootPath: "/tmp/some/dir",
|
||||
BundleRootPath: "/tmp/some/dir",
|
||||
Config: config.Root{
|
||||
Sync: config.Sync{
|
||||
Paths: []string{
|
||||
|
@ -108,7 +109,7 @@ func TestSyncInferRoot_ParentDirectory(t *testing.T) {
|
|||
|
||||
func TestSyncInferRoot_ManyParentDirectories(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
RootPath: "/tmp/some/dir/that/is/very/deeply/nested",
|
||||
BundleRootPath: "/tmp/some/dir/that/is/very/deeply/nested",
|
||||
Config: config.Root{
|
||||
Sync: config.Sync{
|
||||
Paths: []string{
|
||||
|
@ -145,7 +146,7 @@ func TestSyncInferRoot_ManyParentDirectories(t *testing.T) {
|
|||
|
||||
func TestSyncInferRoot_MultiplePaths(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
RootPath: "/tmp/some/bundle/root",
|
||||
BundleRootPath: "/tmp/some/bundle/root",
|
||||
Config: config.Root{
|
||||
Sync: config.Sync{
|
||||
Paths: []string{
|
||||
|
@ -172,7 +173,7 @@ func TestSyncInferRoot_MultiplePaths(t *testing.T) {
|
|||
|
||||
func TestSyncInferRoot_Error(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
RootPath: "/tmp/some/dir",
|
||||
BundleRootPath: "/tmp/some/dir",
|
||||
Config: config.Root{
|
||||
Sync: config.Sync{
|
||||
Paths: []string{
|
||||
|
@ -184,7 +185,7 @@ func TestSyncInferRoot_Error(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
bundletest.SetLocation(b, "sync.paths", "databricks.yml")
|
||||
bundletest.SetLocation(b, "sync.paths", []dyn.Location{{File: "databricks.yml"}})
|
||||
|
||||
ctx := context.Background()
|
||||
diags := bundle.Apply(ctx, b, mutator.SyncInferRoot())
|
||||
|
|
|
@ -4,97 +4,11 @@ import (
|
|||
"fmt"
|
||||
"slices"
|
||||
|
||||
"github.com/databricks/cli/bundle/libraries"
|
||||
"github.com/databricks/cli/bundle/config/mutator/paths"
|
||||
|
||||
"github.com/databricks/cli/libs/dyn"
|
||||
)
|
||||
|
||||
type jobRewritePattern struct {
|
||||
pattern dyn.Pattern
|
||||
fn rewriteFunc
|
||||
skipRewrite func(string) bool
|
||||
}
|
||||
|
||||
func noSkipRewrite(string) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func rewritePatterns(t *translateContext, base dyn.Pattern) []jobRewritePattern {
|
||||
return []jobRewritePattern{
|
||||
{
|
||||
base.Append(dyn.Key("notebook_task"), dyn.Key("notebook_path")),
|
||||
t.translateNotebookPath,
|
||||
noSkipRewrite,
|
||||
},
|
||||
{
|
||||
base.Append(dyn.Key("spark_python_task"), dyn.Key("python_file")),
|
||||
t.translateFilePath,
|
||||
noSkipRewrite,
|
||||
},
|
||||
{
|
||||
base.Append(dyn.Key("dbt_task"), dyn.Key("project_directory")),
|
||||
t.translateDirectoryPath,
|
||||
noSkipRewrite,
|
||||
},
|
||||
{
|
||||
base.Append(dyn.Key("sql_task"), dyn.Key("file"), dyn.Key("path")),
|
||||
t.translateFilePath,
|
||||
noSkipRewrite,
|
||||
},
|
||||
{
|
||||
base.Append(dyn.Key("libraries"), dyn.AnyIndex(), dyn.Key("whl")),
|
||||
t.translateNoOp,
|
||||
noSkipRewrite,
|
||||
},
|
||||
{
|
||||
base.Append(dyn.Key("libraries"), dyn.AnyIndex(), dyn.Key("jar")),
|
||||
t.translateNoOp,
|
||||
noSkipRewrite,
|
||||
},
|
||||
{
|
||||
base.Append(dyn.Key("libraries"), dyn.AnyIndex(), dyn.Key("requirements")),
|
||||
t.translateFilePath,
|
||||
noSkipRewrite,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (t *translateContext) jobRewritePatterns() []jobRewritePattern {
|
||||
// Base pattern to match all tasks in all jobs.
|
||||
base := dyn.NewPattern(
|
||||
dyn.Key("resources"),
|
||||
dyn.Key("jobs"),
|
||||
dyn.AnyKey(),
|
||||
dyn.Key("tasks"),
|
||||
dyn.AnyIndex(),
|
||||
)
|
||||
|
||||
// Compile list of patterns and their respective rewrite functions.
|
||||
jobEnvironmentsPatterns := []jobRewritePattern{
|
||||
{
|
||||
dyn.NewPattern(
|
||||
dyn.Key("resources"),
|
||||
dyn.Key("jobs"),
|
||||
dyn.AnyKey(),
|
||||
dyn.Key("environments"),
|
||||
dyn.AnyIndex(),
|
||||
dyn.Key("spec"),
|
||||
dyn.Key("dependencies"),
|
||||
dyn.AnyIndex(),
|
||||
),
|
||||
t.translateNoOpWithPrefix,
|
||||
func(s string) bool {
|
||||
return !libraries.IsLibraryLocal(s)
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
taskPatterns := rewritePatterns(t, base)
|
||||
forEachPatterns := rewritePatterns(t, base.Append(dyn.Key("for_each_task"), dyn.Key("task")))
|
||||
allPatterns := append(taskPatterns, jobEnvironmentsPatterns...)
|
||||
allPatterns = append(allPatterns, forEachPatterns...)
|
||||
return allPatterns
|
||||
}
|
||||
|
||||
func (t *translateContext) applyJobTranslations(v dyn.Value) (dyn.Value, error) {
|
||||
var err error
|
||||
|
||||
|
@ -111,8 +25,7 @@ func (t *translateContext) applyJobTranslations(v dyn.Value) (dyn.Value, error)
|
|||
}
|
||||
}
|
||||
|
||||
for _, rewritePattern := range t.jobRewritePatterns() {
|
||||
v, err = dyn.MapByPattern(v, rewritePattern.pattern, func(p dyn.Path, v dyn.Value) (dyn.Value, error) {
|
||||
return paths.VisitJobPaths(v, func(p dyn.Path, kind paths.PathKind, v dyn.Value) (dyn.Value, error) {
|
||||
key := p[2].Key()
|
||||
|
||||
// Skip path translation if the job is using git source.
|
||||
|
@ -125,16 +38,28 @@ func (t *translateContext) applyJobTranslations(v dyn.Value) (dyn.Value, error)
|
|||
return dyn.InvalidValue, fmt.Errorf("unable to determine directory for job %s: %w", key, err)
|
||||
}
|
||||
|
||||
sv := v.MustString()
|
||||
if rewritePattern.skipRewrite(sv) {
|
||||
return v, nil
|
||||
}
|
||||
return t.rewriteRelativeTo(p, v, rewritePattern.fn, dir, fallback[key])
|
||||
})
|
||||
rewritePatternFn, err := t.getRewritePatternFn(kind)
|
||||
if err != nil {
|
||||
return dyn.InvalidValue, err
|
||||
}
|
||||
|
||||
return t.rewriteRelativeTo(p, v, rewritePatternFn, dir, fallback[key])
|
||||
})
|
||||
}
|
||||
|
||||
return v, nil
|
||||
func (t *translateContext) getRewritePatternFn(kind paths.PathKind) (rewriteFunc, error) {
|
||||
switch kind {
|
||||
case paths.PathKindLibrary:
|
||||
return t.translateNoOp, nil
|
||||
case paths.PathKindNotebook:
|
||||
return t.translateNotebookPath, nil
|
||||
case paths.PathKindWorkspaceFile:
|
||||
return t.translateFilePath, nil
|
||||
case paths.PathKindDirectory:
|
||||
return t.translateDirectoryPath, nil
|
||||
case paths.PathKindWithPrefix:
|
||||
return t.translateNoOpWithPrefix, nil
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("unsupported path kind: %d", kind)
|
||||
}
|
||||
|
|
|
@ -82,7 +82,7 @@ func TestTranslatePathsSkippedWithGitSource(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
bundletest.SetLocation(b, ".", filepath.Join(dir, "resource.yml"))
|
||||
bundletest.SetLocation(b, ".", []dyn.Location{{File: filepath.Join(dir, "resource.yml")}})
|
||||
|
||||
diags := bundle.Apply(context.Background(), b, mutator.TranslatePaths())
|
||||
require.NoError(t, diags.Error())
|
||||
|
@ -210,7 +210,7 @@ func TestTranslatePaths(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
bundletest.SetLocation(b, ".", filepath.Join(dir, "resource.yml"))
|
||||
bundletest.SetLocation(b, ".", []dyn.Location{{File: filepath.Join(dir, "resource.yml")}})
|
||||
|
||||
diags := bundle.Apply(context.Background(), b, mutator.TranslatePaths())
|
||||
require.NoError(t, diags.Error())
|
||||
|
@ -346,8 +346,8 @@ func TestTranslatePathsInSubdirectories(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
bundletest.SetLocation(b, "resources.jobs", filepath.Join(dir, "job/resource.yml"))
|
||||
bundletest.SetLocation(b, "resources.pipelines", filepath.Join(dir, "pipeline/resource.yml"))
|
||||
bundletest.SetLocation(b, "resources.jobs", []dyn.Location{{File: filepath.Join(dir, "job/resource.yml")}})
|
||||
bundletest.SetLocation(b, "resources.pipelines", []dyn.Location{{File: filepath.Join(dir, "pipeline/resource.yml")}})
|
||||
|
||||
diags := bundle.Apply(context.Background(), b, mutator.TranslatePaths())
|
||||
require.NoError(t, diags.Error())
|
||||
|
@ -408,7 +408,7 @@ func TestTranslatePathsOutsideSyncRoot(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
bundletest.SetLocation(b, ".", filepath.Join(dir, "../resource.yml"))
|
||||
bundletest.SetLocation(b, ".", []dyn.Location{{File: filepath.Join(dir, "../resource.yml")}})
|
||||
|
||||
diags := bundle.Apply(context.Background(), b, mutator.TranslatePaths())
|
||||
assert.ErrorContains(t, diags.Error(), "is not contained in sync root path")
|
||||
|
@ -439,7 +439,7 @@ func TestJobNotebookDoesNotExistError(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
bundletest.SetLocation(b, ".", filepath.Join(dir, "fake.yml"))
|
||||
bundletest.SetLocation(b, ".", []dyn.Location{{File: filepath.Join(dir, "fake.yml")}})
|
||||
|
||||
diags := bundle.Apply(context.Background(), b, mutator.TranslatePaths())
|
||||
assert.EqualError(t, diags.Error(), "notebook ./doesnt_exist.py not found")
|
||||
|
@ -470,7 +470,7 @@ func TestJobFileDoesNotExistError(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
bundletest.SetLocation(b, ".", filepath.Join(dir, "fake.yml"))
|
||||
bundletest.SetLocation(b, ".", []dyn.Location{{File: filepath.Join(dir, "fake.yml")}})
|
||||
|
||||
diags := bundle.Apply(context.Background(), b, mutator.TranslatePaths())
|
||||
assert.EqualError(t, diags.Error(), "file ./doesnt_exist.py not found")
|
||||
|
@ -501,7 +501,7 @@ func TestPipelineNotebookDoesNotExistError(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
bundletest.SetLocation(b, ".", filepath.Join(dir, "fake.yml"))
|
||||
bundletest.SetLocation(b, ".", []dyn.Location{{File: filepath.Join(dir, "fake.yml")}})
|
||||
|
||||
diags := bundle.Apply(context.Background(), b, mutator.TranslatePaths())
|
||||
assert.EqualError(t, diags.Error(), "notebook ./doesnt_exist.py not found")
|
||||
|
@ -532,7 +532,7 @@ func TestPipelineFileDoesNotExistError(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
bundletest.SetLocation(b, ".", filepath.Join(dir, "fake.yml"))
|
||||
bundletest.SetLocation(b, ".", []dyn.Location{{File: filepath.Join(dir, "fake.yml")}})
|
||||
|
||||
diags := bundle.Apply(context.Background(), b, mutator.TranslatePaths())
|
||||
assert.EqualError(t, diags.Error(), "file ./doesnt_exist.py not found")
|
||||
|
@ -567,7 +567,7 @@ func TestJobSparkPythonTaskWithNotebookSourceError(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
bundletest.SetLocation(b, ".", filepath.Join(dir, "resource.yml"))
|
||||
bundletest.SetLocation(b, ".", []dyn.Location{{File: filepath.Join(dir, "resource.yml")}})
|
||||
|
||||
diags := bundle.Apply(context.Background(), b, mutator.TranslatePaths())
|
||||
assert.ErrorContains(t, diags.Error(), `expected a file for "resources.jobs.job.tasks[0].spark_python_task.python_file" but got a notebook`)
|
||||
|
@ -602,7 +602,7 @@ func TestJobNotebookTaskWithFileSourceError(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
bundletest.SetLocation(b, ".", filepath.Join(dir, "resource.yml"))
|
||||
bundletest.SetLocation(b, ".", []dyn.Location{{File: filepath.Join(dir, "resource.yml")}})
|
||||
|
||||
diags := bundle.Apply(context.Background(), b, mutator.TranslatePaths())
|
||||
assert.ErrorContains(t, diags.Error(), `expected a notebook for "resources.jobs.job.tasks[0].notebook_task.notebook_path" but got a file`)
|
||||
|
@ -637,7 +637,7 @@ func TestPipelineNotebookLibraryWithFileSourceError(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
bundletest.SetLocation(b, ".", filepath.Join(dir, "resource.yml"))
|
||||
bundletest.SetLocation(b, ".", []dyn.Location{{File: filepath.Join(dir, "resource.yml")}})
|
||||
|
||||
diags := bundle.Apply(context.Background(), b, mutator.TranslatePaths())
|
||||
assert.ErrorContains(t, diags.Error(), `expected a notebook for "resources.pipelines.pipeline.libraries[0].notebook.path" but got a file`)
|
||||
|
@ -672,7 +672,7 @@ func TestPipelineFileLibraryWithNotebookSourceError(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
bundletest.SetLocation(b, ".", filepath.Join(dir, "resource.yml"))
|
||||
bundletest.SetLocation(b, ".", []dyn.Location{{File: filepath.Join(dir, "resource.yml")}})
|
||||
|
||||
diags := bundle.Apply(context.Background(), b, mutator.TranslatePaths())
|
||||
assert.ErrorContains(t, diags.Error(), `expected a file for "resources.pipelines.pipeline.libraries[0].file.path" but got a notebook`)
|
||||
|
@ -710,7 +710,7 @@ func TestTranslatePathJobEnvironments(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
bundletest.SetLocation(b, "resources.jobs", filepath.Join(dir, "job/resource.yml"))
|
||||
bundletest.SetLocation(b, "resources.jobs", []dyn.Location{{File: filepath.Join(dir, "job/resource.yml")}})
|
||||
|
||||
diags := bundle.Apply(context.Background(), b, mutator.TranslatePaths())
|
||||
require.NoError(t, diags.Error())
|
||||
|
@ -753,8 +753,8 @@ func TestTranslatePathWithComplexVariables(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
bundletest.SetLocation(b, "variables", filepath.Join(dir, "variables/variables.yml"))
|
||||
bundletest.SetLocation(b, "resources.jobs", filepath.Join(dir, "job/resource.yml"))
|
||||
bundletest.SetLocation(b, "variables", []dyn.Location{{File: filepath.Join(dir, "variables/variables.yml")}})
|
||||
bundletest.SetLocation(b, "resources.jobs", []dyn.Location{{File: filepath.Join(dir, "job/resource.yml")}})
|
||||
|
||||
ctx := context.Background()
|
||||
// Assign the variables to the dynamic configuration.
|
||||
|
|
|
@ -20,6 +20,7 @@ type Resources struct {
|
|||
QualityMonitors map[string]*resources.QualityMonitor `json:"quality_monitors,omitempty"`
|
||||
Schemas map[string]*resources.Schema `json:"schemas,omitempty"`
|
||||
Volumes map[string]*resources.Volume `json:"volumes,omitempty"`
|
||||
Clusters map[string]*resources.Cluster `json:"clusters,omitempty"`
|
||||
}
|
||||
|
||||
type ConfigResource interface {
|
||||
|
@ -59,3 +60,22 @@ func (r *Resources) FindResourceByConfigKey(key string) (ConfigResource, error)
|
|||
|
||||
return found[0], nil
|
||||
}
|
||||
|
||||
type ResourceDescription struct {
|
||||
SingularName string
|
||||
}
|
||||
|
||||
// The keys of the map corresponds to the resource key in the bundle configuration.
|
||||
func SupportedResources() map[string]ResourceDescription {
|
||||
return map[string]ResourceDescription{
|
||||
"jobs": {SingularName: "job"},
|
||||
"pipelines": {SingularName: "pipeline"},
|
||||
"models": {SingularName: "model"},
|
||||
"experiments": {SingularName: "experiment"},
|
||||
"model_serving_endpoints": {SingularName: "model_serving_endpoint"},
|
||||
"registered_models": {SingularName: "registered_model"},
|
||||
"quality_monitors": {SingularName: "quality_monitor"},
|
||||
"schemas": {SingularName: "schema"},
|
||||
"clusters": {SingularName: "cluster"},
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,39 @@
|
|||
package resources
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/databricks/cli/libs/log"
|
||||
"github.com/databricks/databricks-sdk-go"
|
||||
"github.com/databricks/databricks-sdk-go/marshal"
|
||||
"github.com/databricks/databricks-sdk-go/service/compute"
|
||||
)
|
||||
|
||||
type Cluster struct {
|
||||
ID string `json:"id,omitempty" bundle:"readonly"`
|
||||
Permissions []Permission `json:"permissions,omitempty"`
|
||||
ModifiedStatus ModifiedStatus `json:"modified_status,omitempty" bundle:"internal"`
|
||||
|
||||
*compute.ClusterSpec
|
||||
}
|
||||
|
||||
func (s *Cluster) UnmarshalJSON(b []byte) error {
|
||||
return marshal.Unmarshal(b, s)
|
||||
}
|
||||
|
||||
func (s Cluster) MarshalJSON() ([]byte, error) {
|
||||
return marshal.Marshal(s)
|
||||
}
|
||||
|
||||
func (s *Cluster) Exists(ctx context.Context, w *databricks.WorkspaceClient, id string) (bool, error) {
|
||||
_, err := w.Clusters.GetByClusterId(ctx, id)
|
||||
if err != nil {
|
||||
log.Debugf(ctx, "cluster %s does not exist", id)
|
||||
return false, err
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (s *Cluster) TerraformResourceName() string {
|
||||
return "databricks_cluster"
|
||||
}
|
|
@ -3,6 +3,7 @@ package config
|
|||
import (
|
||||
"encoding/json"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
@ -61,3 +62,18 @@ func TestCustomMarshallerIsImplemented(t *testing.T) {
|
|||
}, "Resource %s does not have a custom unmarshaller", field.Name)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSupportedResources(t *testing.T) {
|
||||
expected := map[string]ResourceDescription{}
|
||||
typ := reflect.TypeOf(Resources{})
|
||||
for i := 0; i < typ.NumField(); i++ {
|
||||
field := typ.Field(i)
|
||||
jsonTags := strings.Split(field.Tag.Get("json"), ",")
|
||||
singularName := strings.TrimSuffix(jsonTags[0], "s")
|
||||
expected[jsonTags[0]] = ResourceDescription{SingularName: singularName}
|
||||
}
|
||||
|
||||
// Please add your resource to the SupportedResources() function in resources.go
|
||||
// if you are adding a new resource.
|
||||
assert.Equal(t, expected, SupportedResources())
|
||||
}
|
||||
|
|
|
@ -366,9 +366,9 @@ func (r *Root) MergeTargetOverrides(name string) error {
|
|||
}
|
||||
}
|
||||
|
||||
// Merge `compute_id`. This field must be overwritten if set, not merged.
|
||||
if v := target.Get("compute_id"); v.Kind() != dyn.KindInvalid {
|
||||
root, err = dyn.SetByPath(root, dyn.NewPath(dyn.Key("bundle"), dyn.Key("compute_id")), v)
|
||||
// Merge `cluster_id`. This field must be overwritten if set, not merged.
|
||||
if v := target.Get("cluster_id"); v.Kind() != dyn.KindInvalid {
|
||||
root, err = dyn.SetByPath(root, dyn.NewPath(dyn.Key("bundle"), dyn.Key("cluster_id")), v)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -406,23 +406,45 @@ func (r *Root) MergeTargetOverrides(name string) error {
|
|||
return r.updateWithDynamicValue(root)
|
||||
}
|
||||
|
||||
var variableKeywords = []string{"default", "lookup"}
|
||||
var allowedVariableDefinitions = []([]string){
|
||||
{"default", "type", "description"},
|
||||
{"default", "type"},
|
||||
{"default", "description"},
|
||||
{"lookup", "description"},
|
||||
{"default"},
|
||||
{"lookup"},
|
||||
}
|
||||
|
||||
// isFullVariableOverrideDef checks if the given value is a full syntax varaible override.
|
||||
// A full syntax variable override is a map with only one of the following
|
||||
// keys: "default", "lookup".
|
||||
// A full syntax variable override is a map with either 1 of 2 keys.
|
||||
// If it's 2 keys, the keys should be "default" and "type".
|
||||
// If it's 1 key, the key should be one of the following keys: "default", "lookup".
|
||||
func isFullVariableOverrideDef(v dyn.Value) bool {
|
||||
mv, ok := v.AsMap()
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
if mv.Len() != 1 {
|
||||
// If the map has more than 3 keys, it is not a full variable override.
|
||||
if mv.Len() > 3 {
|
||||
return false
|
||||
}
|
||||
|
||||
for _, keyword := range variableKeywords {
|
||||
if _, ok := mv.GetByString(keyword); ok {
|
||||
for _, keys := range allowedVariableDefinitions {
|
||||
if len(keys) != mv.Len() {
|
||||
continue
|
||||
}
|
||||
|
||||
// Check if the keys are the same.
|
||||
match := true
|
||||
for _, key := range keys {
|
||||
if _, ok := mv.GetByString(key); !ok {
|
||||
match = false
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if match {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
|
|
@ -6,6 +6,7 @@ import (
|
|||
"testing"
|
||||
|
||||
"github.com/databricks/cli/bundle/config/variable"
|
||||
"github.com/databricks/cli/libs/dyn"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
@ -169,3 +170,87 @@ func TestRootMergeTargetOverridesWithVariables(t *testing.T) {
|
|||
assert.Equal(t, "complex var", root.Variables["complex"].Description)
|
||||
|
||||
}
|
||||
|
||||
func TestIsFullVariableOverrideDef(t *testing.T) {
|
||||
testCases := []struct {
|
||||
value dyn.Value
|
||||
expected bool
|
||||
}{
|
||||
{
|
||||
value: dyn.V(map[string]dyn.Value{
|
||||
"type": dyn.V("string"),
|
||||
"default": dyn.V("foo"),
|
||||
"description": dyn.V("foo var"),
|
||||
}),
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
value: dyn.V(map[string]dyn.Value{
|
||||
"type": dyn.V("string"),
|
||||
"lookup": dyn.V("foo"),
|
||||
"description": dyn.V("foo var"),
|
||||
}),
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
value: dyn.V(map[string]dyn.Value{
|
||||
"type": dyn.V("string"),
|
||||
"default": dyn.V("foo"),
|
||||
}),
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
value: dyn.V(map[string]dyn.Value{
|
||||
"type": dyn.V("string"),
|
||||
"lookup": dyn.V("foo"),
|
||||
}),
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
value: dyn.V(map[string]dyn.Value{
|
||||
"description": dyn.V("string"),
|
||||
"default": dyn.V("foo"),
|
||||
}),
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
value: dyn.V(map[string]dyn.Value{
|
||||
"description": dyn.V("string"),
|
||||
"lookup": dyn.V("foo"),
|
||||
}),
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
value: dyn.V(map[string]dyn.Value{
|
||||
"default": dyn.V("foo"),
|
||||
}),
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
value: dyn.V(map[string]dyn.Value{
|
||||
"lookup": dyn.V("foo"),
|
||||
}),
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
value: dyn.V(map[string]dyn.Value{
|
||||
"type": dyn.V("string"),
|
||||
}),
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
value: dyn.V(map[string]dyn.Value{
|
||||
"type": dyn.V("string"),
|
||||
"default": dyn.V("foo"),
|
||||
"description": dyn.V("foo var"),
|
||||
"lookup": dyn.V("foo"),
|
||||
}),
|
||||
expected: false,
|
||||
},
|
||||
}
|
||||
|
||||
for i, tc := range testCases {
|
||||
assert.Equal(t, tc.expected, isFullVariableOverrideDef(tc.value), "test case %d", i)
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -24,8 +24,11 @@ type Target struct {
|
|||
// name prefix of deployed resources.
|
||||
Presets Presets `json:"presets,omitempty"`
|
||||
|
||||
// Overrides the compute used for jobs and other supported assets.
|
||||
ComputeID string `json:"compute_id,omitempty"`
|
||||
// DEPRECATED: Overrides the compute used for jobs and other supported assets.
|
||||
ComputeId string `json:"compute_id,omitempty"`
|
||||
|
||||
// Overrides the cluster used for jobs and other supported assets.
|
||||
ClusterId string `json:"cluster_id,omitempty"`
|
||||
|
||||
Bundle *Bundle `json:"bundle,omitempty"`
|
||||
|
||||
|
|
|
@ -0,0 +1,161 @@
|
|||
package validate
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/libs/diag"
|
||||
"github.com/databricks/cli/libs/dyn"
|
||||
"github.com/databricks/databricks-sdk-go/service/jobs"
|
||||
)
|
||||
|
||||
// JobTaskClusterSpec validates that job tasks have cluster spec defined
|
||||
// if task requires a cluster
|
||||
func JobTaskClusterSpec() bundle.ReadOnlyMutator {
|
||||
return &jobTaskClusterSpec{}
|
||||
}
|
||||
|
||||
type jobTaskClusterSpec struct {
|
||||
}
|
||||
|
||||
func (v *jobTaskClusterSpec) Name() string {
|
||||
return "validate:job_task_cluster_spec"
|
||||
}
|
||||
|
||||
func (v *jobTaskClusterSpec) Apply(ctx context.Context, rb bundle.ReadOnlyBundle) diag.Diagnostics {
|
||||
diags := diag.Diagnostics{}
|
||||
|
||||
jobsPath := dyn.NewPath(dyn.Key("resources"), dyn.Key("jobs"))
|
||||
|
||||
for resourceName, job := range rb.Config().Resources.Jobs {
|
||||
resourcePath := jobsPath.Append(dyn.Key(resourceName))
|
||||
|
||||
for taskIndex, task := range job.Tasks {
|
||||
taskPath := resourcePath.Append(dyn.Key("tasks"), dyn.Index(taskIndex))
|
||||
|
||||
diags = diags.Extend(validateJobTask(rb, task, taskPath))
|
||||
}
|
||||
}
|
||||
|
||||
return diags
|
||||
}
|
||||
|
||||
func validateJobTask(rb bundle.ReadOnlyBundle, task jobs.Task, taskPath dyn.Path) diag.Diagnostics {
|
||||
diags := diag.Diagnostics{}
|
||||
|
||||
var specified []string
|
||||
var unspecified []string
|
||||
|
||||
if task.JobClusterKey != "" {
|
||||
specified = append(specified, "job_cluster_key")
|
||||
} else {
|
||||
unspecified = append(unspecified, "job_cluster_key")
|
||||
}
|
||||
|
||||
if task.EnvironmentKey != "" {
|
||||
specified = append(specified, "environment_key")
|
||||
} else {
|
||||
unspecified = append(unspecified, "environment_key")
|
||||
}
|
||||
|
||||
if task.ExistingClusterId != "" {
|
||||
specified = append(specified, "existing_cluster_id")
|
||||
} else {
|
||||
unspecified = append(unspecified, "existing_cluster_id")
|
||||
}
|
||||
|
||||
if task.NewCluster != nil {
|
||||
specified = append(specified, "new_cluster")
|
||||
} else {
|
||||
unspecified = append(unspecified, "new_cluster")
|
||||
}
|
||||
|
||||
if task.ForEachTask != nil {
|
||||
forEachTaskPath := taskPath.Append(dyn.Key("for_each_task"), dyn.Key("task"))
|
||||
|
||||
diags = diags.Extend(validateJobTask(rb, task.ForEachTask.Task, forEachTaskPath))
|
||||
}
|
||||
|
||||
if isComputeTask(task) && len(specified) == 0 {
|
||||
if task.NotebookTask != nil {
|
||||
// notebook tasks without cluster spec will use notebook environment
|
||||
} else {
|
||||
// path might be not very helpful, adding user-specified task key clarifies the context
|
||||
detail := fmt.Sprintf(
|
||||
"Task %q requires a cluster or an environment to run.\nSpecify one of the following fields: %s.",
|
||||
task.TaskKey,
|
||||
strings.Join(unspecified, ", "),
|
||||
)
|
||||
|
||||
diags = diags.Append(diag.Diagnostic{
|
||||
Severity: diag.Error,
|
||||
Summary: "Missing required cluster or environment settings",
|
||||
Detail: detail,
|
||||
Locations: rb.Config().GetLocations(taskPath.String()),
|
||||
Paths: []dyn.Path{taskPath},
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
return diags
|
||||
}
|
||||
|
||||
// isComputeTask returns true if the task runs on a cluster or serverless GC
|
||||
func isComputeTask(task jobs.Task) bool {
|
||||
if task.NotebookTask != nil {
|
||||
// if warehouse_id is set, it's SQL notebook that doesn't need cluster or serverless GC
|
||||
if task.NotebookTask.WarehouseId != "" {
|
||||
return false
|
||||
} else {
|
||||
// task settings don't require specifying a cluster/serverless GC, but task itself can run on one
|
||||
// we handle that case separately in validateJobTask
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
if task.PythonWheelTask != nil {
|
||||
return true
|
||||
}
|
||||
|
||||
if task.DbtTask != nil {
|
||||
return true
|
||||
}
|
||||
|
||||
if task.SparkJarTask != nil {
|
||||
return true
|
||||
}
|
||||
|
||||
if task.SparkSubmitTask != nil {
|
||||
return true
|
||||
}
|
||||
|
||||
if task.SparkPythonTask != nil {
|
||||
return true
|
||||
}
|
||||
|
||||
if task.SqlTask != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
if task.PipelineTask != nil {
|
||||
// while pipelines use clusters, pipeline tasks don't, they only trigger pipelines
|
||||
return false
|
||||
}
|
||||
|
||||
if task.RunJobTask != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
if task.ConditionTask != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
// for each task doesn't use clusters, underlying task(s) can though
|
||||
if task.ForEachTask != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
|
@ -0,0 +1,203 @@
|
|||
package validate
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/bundle/config"
|
||||
"github.com/databricks/cli/bundle/config/resources"
|
||||
"github.com/databricks/databricks-sdk-go/service/compute"
|
||||
"github.com/databricks/databricks-sdk-go/service/jobs"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestJobTaskClusterSpec(t *testing.T) {
|
||||
expectedSummary := "Missing required cluster or environment settings"
|
||||
|
||||
type testCase struct {
|
||||
name string
|
||||
task jobs.Task
|
||||
errorPath string
|
||||
errorDetail string
|
||||
errorSummary string
|
||||
}
|
||||
|
||||
testCases := []testCase{
|
||||
{
|
||||
name: "valid notebook task",
|
||||
task: jobs.Task{
|
||||
// while a cluster is needed, it will use notebook environment to create one
|
||||
NotebookTask: &jobs.NotebookTask{},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "valid notebook task (job_cluster_key)",
|
||||
task: jobs.Task{
|
||||
JobClusterKey: "cluster1",
|
||||
NotebookTask: &jobs.NotebookTask{},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "valid notebook task (new_cluster)",
|
||||
task: jobs.Task{
|
||||
NewCluster: &compute.ClusterSpec{},
|
||||
NotebookTask: &jobs.NotebookTask{},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "valid notebook task (existing_cluster_id)",
|
||||
task: jobs.Task{
|
||||
ExistingClusterId: "cluster1",
|
||||
NotebookTask: &jobs.NotebookTask{},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "valid SQL notebook task",
|
||||
task: jobs.Task{
|
||||
NotebookTask: &jobs.NotebookTask{
|
||||
WarehouseId: "warehouse1",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "valid python wheel task",
|
||||
task: jobs.Task{
|
||||
JobClusterKey: "cluster1",
|
||||
PythonWheelTask: &jobs.PythonWheelTask{},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "valid python wheel task (environment_key)",
|
||||
task: jobs.Task{
|
||||
EnvironmentKey: "environment1",
|
||||
PythonWheelTask: &jobs.PythonWheelTask{},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "valid dbt task",
|
||||
task: jobs.Task{
|
||||
JobClusterKey: "cluster1",
|
||||
DbtTask: &jobs.DbtTask{},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "valid spark jar task",
|
||||
task: jobs.Task{
|
||||
JobClusterKey: "cluster1",
|
||||
SparkJarTask: &jobs.SparkJarTask{},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "valid spark submit",
|
||||
task: jobs.Task{
|
||||
NewCluster: &compute.ClusterSpec{},
|
||||
SparkSubmitTask: &jobs.SparkSubmitTask{},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "valid spark python task",
|
||||
task: jobs.Task{
|
||||
JobClusterKey: "cluster1",
|
||||
SparkPythonTask: &jobs.SparkPythonTask{},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "valid SQL task",
|
||||
task: jobs.Task{
|
||||
SqlTask: &jobs.SqlTask{},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "valid pipeline task",
|
||||
task: jobs.Task{
|
||||
PipelineTask: &jobs.PipelineTask{},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "valid run job task",
|
||||
task: jobs.Task{
|
||||
RunJobTask: &jobs.RunJobTask{},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "valid condition task",
|
||||
task: jobs.Task{
|
||||
ConditionTask: &jobs.ConditionTask{},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "valid for each task",
|
||||
task: jobs.Task{
|
||||
ForEachTask: &jobs.ForEachTask{
|
||||
Task: jobs.Task{
|
||||
JobClusterKey: "cluster1",
|
||||
NotebookTask: &jobs.NotebookTask{},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "invalid python wheel task",
|
||||
task: jobs.Task{
|
||||
PythonWheelTask: &jobs.PythonWheelTask{},
|
||||
TaskKey: "my_task",
|
||||
},
|
||||
errorPath: "resources.jobs.job1.tasks[0]",
|
||||
errorDetail: `Task "my_task" requires a cluster or an environment to run.
|
||||
Specify one of the following fields: job_cluster_key, environment_key, existing_cluster_id, new_cluster.`,
|
||||
errorSummary: expectedSummary,
|
||||
},
|
||||
{
|
||||
name: "invalid for each task",
|
||||
task: jobs.Task{
|
||||
ForEachTask: &jobs.ForEachTask{
|
||||
Task: jobs.Task{
|
||||
PythonWheelTask: &jobs.PythonWheelTask{},
|
||||
TaskKey: "my_task",
|
||||
},
|
||||
},
|
||||
},
|
||||
errorPath: "resources.jobs.job1.tasks[0].for_each_task.task",
|
||||
errorDetail: `Task "my_task" requires a cluster or an environment to run.
|
||||
Specify one of the following fields: job_cluster_key, environment_key, existing_cluster_id, new_cluster.`,
|
||||
errorSummary: expectedSummary,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
job := &resources.Job{
|
||||
JobSettings: &jobs.JobSettings{
|
||||
Tasks: []jobs.Task{tc.task},
|
||||
},
|
||||
}
|
||||
|
||||
b := createBundle(map[string]*resources.Job{"job1": job})
|
||||
diags := bundle.ApplyReadOnly(context.Background(), bundle.ReadOnly(b), JobTaskClusterSpec())
|
||||
|
||||
if tc.errorPath != "" || tc.errorDetail != "" || tc.errorSummary != "" {
|
||||
assert.Len(t, diags, 1)
|
||||
assert.Len(t, diags[0].Paths, 1)
|
||||
|
||||
diag := diags[0]
|
||||
|
||||
assert.Equal(t, tc.errorPath, diag.Paths[0].String())
|
||||
assert.Equal(t, tc.errorSummary, diag.Summary)
|
||||
assert.Equal(t, tc.errorDetail, diag.Detail)
|
||||
} else {
|
||||
assert.ElementsMatch(t, []string{}, diags)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func createBundle(jobs map[string]*resources.Job) *bundle.Bundle {
|
||||
return &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Resources: config.Resources{
|
||||
Jobs: jobs,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
|
@ -34,6 +34,7 @@ func (v *validate) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics
|
|||
JobClusterKeyDefined(),
|
||||
FilesToSync(),
|
||||
ValidateSyncPatterns(),
|
||||
JobTaskClusterSpec(),
|
||||
))
|
||||
}
|
||||
|
||||
|
|
|
@ -47,13 +47,18 @@ type Workspace struct {
|
|||
|
||||
// Remote workspace base path for deployment state, for artifacts, as synchronization target.
|
||||
// This defaults to "~/.bundle/${bundle.name}/${bundle.target}" where "~" expands to
|
||||
// the current user's home directory in the workspace (e.g. `/Users/jane@doe.com`).
|
||||
// the current user's home directory in the workspace (e.g. `/Workspace/Users/jane@doe.com`).
|
||||
RootPath string `json:"root_path,omitempty"`
|
||||
|
||||
// Remote workspace path to synchronize local files to.
|
||||
// This defaults to "${workspace.root}/files".
|
||||
FilePath string `json:"file_path,omitempty"`
|
||||
|
||||
// Remote workspace path for resources with a presence in the workspace.
|
||||
// These are kept outside [FilePath] to avoid potential naming collisions.
|
||||
// This defaults to "${workspace.root}/resources".
|
||||
ResourcePath string `json:"resource_path,omitempty"`
|
||||
|
||||
// Remote workspace path for build artifacts.
|
||||
// This defaults to "${workspace.root}/artifacts".
|
||||
ArtifactPath string `json:"artifact_path,omitempty"`
|
||||
|
|
|
@ -2,15 +2,21 @@ package files
|
|||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/fs"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/bundle/permissions"
|
||||
"github.com/databricks/cli/libs/cmdio"
|
||||
"github.com/databricks/cli/libs/diag"
|
||||
"github.com/databricks/cli/libs/log"
|
||||
"github.com/databricks/cli/libs/sync"
|
||||
)
|
||||
|
||||
type upload struct{}
|
||||
type upload struct {
|
||||
outputHandler sync.OutputHandler
|
||||
}
|
||||
|
||||
func (m *upload) Name() string {
|
||||
return "files.Upload"
|
||||
|
@ -18,13 +24,23 @@ func (m *upload) Name() string {
|
|||
|
||||
func (m *upload) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||
cmdio.LogString(ctx, fmt.Sprintf("Uploading bundle files to %s...", b.Config.Workspace.FilePath))
|
||||
sync, err := GetSync(ctx, bundle.ReadOnly(b))
|
||||
opts, err := GetSyncOptions(ctx, bundle.ReadOnly(b))
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
||||
opts.OutputHandler = m.outputHandler
|
||||
sync, err := sync.New(ctx, *opts)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
defer sync.Close()
|
||||
|
||||
b.Files, err = sync.RunOnce(ctx)
|
||||
if err != nil {
|
||||
if errors.Is(err, fs.ErrPermission) {
|
||||
return permissions.ReportPossiblePermissionDenied(ctx, b, b.Config.Workspace.FilePath)
|
||||
}
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
||||
|
@ -32,6 +48,6 @@ func (m *upload) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
|||
return nil
|
||||
}
|
||||
|
||||
func Upload() bundle.Mutator {
|
||||
return &upload{}
|
||||
func Upload(outputHandler sync.OutputHandler) bundle.Mutator {
|
||||
return &upload{outputHandler}
|
||||
}
|
||||
|
|
|
@ -3,8 +3,10 @@ package lock
|
|||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"io/fs"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/bundle/permissions"
|
||||
"github.com/databricks/cli/libs/diag"
|
||||
"github.com/databricks/cli/libs/filer"
|
||||
"github.com/databricks/cli/libs/locker"
|
||||
|
@ -51,12 +53,17 @@ func (m *acquire) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics
|
|||
if err != nil {
|
||||
log.Errorf(ctx, "Failed to acquire deployment lock: %v", err)
|
||||
|
||||
if errors.Is(err, fs.ErrPermission) {
|
||||
return permissions.ReportPossiblePermissionDenied(ctx, b, b.Config.Workspace.StatePath)
|
||||
}
|
||||
|
||||
notExistsError := filer.NoSuchDirectoryError{}
|
||||
if errors.As(err, ¬ExistsError) {
|
||||
// If we get a "doesn't exist" error from the API this indicates
|
||||
// we either don't have permissions or the path is invalid.
|
||||
return diag.Errorf("cannot write to deployment root (this can indicate a previous deploy was done with a different identity): %s", b.Config.Workspace.RootPath)
|
||||
return permissions.ReportPossiblePermissionDenied(ctx, b, b.Config.Workspace.StatePath)
|
||||
}
|
||||
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
||||
|
|
|
@ -40,7 +40,7 @@ func (m *compute) Apply(_ context.Context, b *bundle.Bundle) diag.Diagnostics {
|
|||
// Compute config file path the job is defined in, relative to the bundle
|
||||
// root
|
||||
l := b.Config.GetLocation("resources.jobs." + name)
|
||||
relativePath, err := filepath.Rel(b.RootPath, l.File)
|
||||
relativePath, err := filepath.Rel(b.BundleRootPath, l.File)
|
||||
if err != nil {
|
||||
return diag.Errorf("failed to compute relative path for job %s: %v", name, err)
|
||||
}
|
||||
|
|
|
@ -9,6 +9,7 @@ import (
|
|||
"github.com/databricks/cli/bundle/config/resources"
|
||||
"github.com/databricks/cli/bundle/internal/bundletest"
|
||||
"github.com/databricks/cli/bundle/metadata"
|
||||
"github.com/databricks/cli/libs/dyn"
|
||||
"github.com/databricks/databricks-sdk-go/service/jobs"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
@ -55,9 +56,9 @@ func TestComputeMetadataMutator(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
bundletest.SetLocation(b, "resources.jobs.my-job-1", "a/b/c")
|
||||
bundletest.SetLocation(b, "resources.jobs.my-job-2", "d/e/f")
|
||||
bundletest.SetLocation(b, "resources.pipelines.my-pipeline", "abc")
|
||||
bundletest.SetLocation(b, "resources.jobs.my-job-1", []dyn.Location{{File: "a/b/c"}})
|
||||
bundletest.SetLocation(b, "resources.jobs.my-job-2", []dyn.Location{{File: "d/e/f"}})
|
||||
bundletest.SetLocation(b, "resources.pipelines.my-pipeline", []dyn.Location{{File: "abc"}})
|
||||
|
||||
expectedMetadata := metadata.Metadata{
|
||||
Version: metadata.Version,
|
||||
|
|
|
@ -62,7 +62,7 @@ func testStatePull(t *testing.T, opts statePullOpts) {
|
|||
|
||||
tmpDir := t.TempDir()
|
||||
b := &bundle.Bundle{
|
||||
RootPath: tmpDir,
|
||||
BundleRootPath: tmpDir,
|
||||
BundleRoot: vfs.MustNew(tmpDir),
|
||||
|
||||
SyncRootPath: tmpDir,
|
||||
|
@ -259,7 +259,7 @@ func TestStatePullNoState(t *testing.T) {
|
|||
}}
|
||||
|
||||
b := &bundle.Bundle{
|
||||
RootPath: t.TempDir(),
|
||||
BundleRootPath: t.TempDir(),
|
||||
Config: config.Root{
|
||||
Bundle: config.Bundle{
|
||||
Target: "default",
|
||||
|
@ -447,7 +447,7 @@ func TestStatePullNewerDeploymentStateVersion(t *testing.T) {
|
|||
}}
|
||||
|
||||
b := &bundle.Bundle{
|
||||
RootPath: t.TempDir(),
|
||||
BundleRootPath: t.TempDir(),
|
||||
Config: config.Root{
|
||||
Bundle: config.Bundle{
|
||||
Target: "default",
|
||||
|
|
|
@ -10,6 +10,8 @@ import (
|
|||
"github.com/databricks/cli/libs/log"
|
||||
)
|
||||
|
||||
const MaxStateFileSize = 10 * 1024 * 1024 // 10MB
|
||||
|
||||
type statePush struct {
|
||||
filerFactory FilerFactory
|
||||
}
|
||||
|
@ -35,6 +37,17 @@ func (s *statePush) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostic
|
|||
}
|
||||
defer local.Close()
|
||||
|
||||
if !b.Config.Bundle.Force {
|
||||
state, err := local.Stat()
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
||||
if state.Size() > MaxStateFileSize {
|
||||
return diag.Errorf("Deployment state file size exceeds the maximum allowed size of %d bytes. Please reduce the number of resources in your bundle, split your bundle into multiple or re-run the command with --force flag.", MaxStateFileSize)
|
||||
}
|
||||
}
|
||||
|
||||
log.Infof(ctx, "Writing local deployment state file to remote state directory")
|
||||
err = f.Write(ctx, DeploymentStateFileName, local, filer.CreateParentDirectories, filer.OverwriteIfExists)
|
||||
if err != nil {
|
||||
|
|
|
@ -45,7 +45,7 @@ func TestStatePush(t *testing.T) {
|
|||
}}
|
||||
|
||||
b := &bundle.Bundle{
|
||||
RootPath: t.TempDir(),
|
||||
BundleRootPath: t.TempDir(),
|
||||
Config: config.Root{
|
||||
Bundle: config.Bundle{
|
||||
Target: "default",
|
||||
|
|
|
@ -27,7 +27,7 @@ func setupBundleForStateUpdate(t *testing.T) *bundle.Bundle {
|
|||
require.NoError(t, err)
|
||||
|
||||
return &bundle.Bundle{
|
||||
RootPath: tmpDir,
|
||||
BundleRootPath: tmpDir,
|
||||
Config: config.Root{
|
||||
Bundle: config.Bundle{
|
||||
Target: "default",
|
||||
|
|
|
@ -4,6 +4,7 @@ import (
|
|||
"context"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/bundle/permissions"
|
||||
"github.com/databricks/cli/libs/diag"
|
||||
"github.com/databricks/cli/libs/log"
|
||||
"github.com/hashicorp/terraform-exec/tfexec"
|
||||
|
@ -34,6 +35,10 @@ func (w *apply) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
|||
// Apply terraform according to the computed plan
|
||||
err := tf.Apply(ctx, tfexec.DirOrPlan(b.Plan.Path))
|
||||
if err != nil {
|
||||
diags := permissions.TryExtendTerraformPermissionError(ctx, b, err)
|
||||
if diags != nil {
|
||||
return diags
|
||||
}
|
||||
return diag.Errorf("terraform apply: %v", err)
|
||||
}
|
||||
|
||||
|
|
|
@ -4,6 +4,7 @@ import (
|
|||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"sort"
|
||||
|
||||
"github.com/databricks/cli/bundle/config"
|
||||
"github.com/databricks/cli/bundle/config/resources"
|
||||
|
@ -82,6 +83,10 @@ func BundleToTerraform(config *config.Root) *schema.Root {
|
|||
conv(src, &dst)
|
||||
|
||||
if src.JobSettings != nil {
|
||||
sort.Slice(src.JobSettings.Tasks, func(i, j int) bool {
|
||||
return src.JobSettings.Tasks[i].TaskKey < src.JobSettings.Tasks[j].TaskKey
|
||||
})
|
||||
|
||||
for _, v := range src.Tasks {
|
||||
var t schema.ResourceJobTask
|
||||
conv(v, &t)
|
||||
|
@ -231,6 +236,13 @@ func BundleToTerraform(config *config.Root) *schema.Root {
|
|||
tfroot.Resource.QualityMonitor[k] = &dst
|
||||
}
|
||||
|
||||
for k, src := range config.Resources.Clusters {
|
||||
noResources = false
|
||||
var dst schema.ResourceCluster
|
||||
conv(src, &dst)
|
||||
tfroot.Resource.Cluster[k] = &dst
|
||||
}
|
||||
|
||||
// We explicitly set "resource" to nil to omit it from a JSON encoding.
|
||||
// This is required because the terraform CLI requires >= 1 resources defined
|
||||
// if the "resource" property is used in a .tf.json file.
|
||||
|
@ -404,6 +416,16 @@ func TerraformToBundle(state *resourcesState, config *config.Root) error {
|
|||
}
|
||||
cur.ID = instance.Attributes.ID
|
||||
config.Resources.Volumes[resource.Name] = cur
|
||||
case "databricks_cluster":
|
||||
if config.Resources.Clusters == nil {
|
||||
config.Resources.Clusters = make(map[string]*resources.Cluster)
|
||||
}
|
||||
cur := config.Resources.Clusters[resource.Name]
|
||||
if cur == nil {
|
||||
cur = &resources.Cluster{ModifiedStatus: resources.ModifiedStatusDeleted}
|
||||
}
|
||||
cur.ID = instance.Attributes.ID
|
||||
config.Resources.Clusters[resource.Name] = cur
|
||||
case "databricks_permissions":
|
||||
case "databricks_grants":
|
||||
// Ignore; no need to pull these back into the configuration.
|
||||
|
@ -458,6 +480,11 @@ func TerraformToBundle(state *resourcesState, config *config.Root) error {
|
|||
src.ModifiedStatus = resources.ModifiedStatusCreated
|
||||
}
|
||||
}
|
||||
for _, src := range config.Resources.Clusters {
|
||||
if src.ModifiedStatus == "" && src.ID == "" {
|
||||
src.ModifiedStatus = resources.ModifiedStatusCreated
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -671,6 +671,14 @@ func TestTerraformToBundleEmptyLocalResources(t *testing.T) {
|
|||
{Attributes: stateInstanceAttributes{ID: "1"}},
|
||||
},
|
||||
},
|
||||
{
|
||||
Type: "databricks_cluster",
|
||||
Mode: "managed",
|
||||
Name: "test_cluster",
|
||||
Instances: []stateResourceInstance{
|
||||
{Attributes: stateInstanceAttributes{ID: "1"}},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
err := TerraformToBundle(&tfState, &config)
|
||||
|
@ -703,6 +711,9 @@ func TestTerraformToBundleEmptyLocalResources(t *testing.T) {
|
|||
assert.Equal(t, "1", config.Resources.Volumes["test_volume"].ID)
|
||||
assert.Equal(t, resources.ModifiedStatusDeleted, config.Resources.Volumes["test_volume"].ModifiedStatus)
|
||||
|
||||
assert.Equal(t, "1", config.Resources.Clusters["test_cluster"].ID)
|
||||
assert.Equal(t, resources.ModifiedStatusDeleted, config.Resources.Clusters["test_cluster"].ModifiedStatus)
|
||||
|
||||
AssertFullResourceCoverage(t, &config)
|
||||
}
|
||||
|
||||
|
@ -772,6 +783,13 @@ func TestTerraformToBundleEmptyRemoteResources(t *testing.T) {
|
|||
},
|
||||
},
|
||||
},
|
||||
Clusters: map[string]*resources.Cluster{
|
||||
"test_cluster": {
|
||||
ClusterSpec: &compute.ClusterSpec{
|
||||
ClusterName: "test_cluster",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
var tfState = resourcesState{
|
||||
|
@ -807,6 +825,9 @@ func TestTerraformToBundleEmptyRemoteResources(t *testing.T) {
|
|||
assert.Equal(t, "", config.Resources.Volumes["test_volume"].ID)
|
||||
assert.Equal(t, resources.ModifiedStatusCreated, config.Resources.Volumes["test_volume"].ModifiedStatus)
|
||||
|
||||
assert.Equal(t, "", config.Resources.Clusters["test_cluster"].ID)
|
||||
assert.Equal(t, resources.ModifiedStatusCreated, config.Resources.Clusters["test_cluster"].ModifiedStatus)
|
||||
|
||||
AssertFullResourceCoverage(t, &config)
|
||||
}
|
||||
|
||||
|
@ -921,6 +942,18 @@ func TestTerraformToBundleModifiedResources(t *testing.T) {
|
|||
},
|
||||
},
|
||||
},
|
||||
Clusters: map[string]*resources.Cluster{
|
||||
"test_cluster": {
|
||||
ClusterSpec: &compute.ClusterSpec{
|
||||
ClusterName: "test_cluster",
|
||||
},
|
||||
},
|
||||
"test_cluster_new": {
|
||||
ClusterSpec: &compute.ClusterSpec{
|
||||
ClusterName: "test_cluster_new",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
var tfState = resourcesState{
|
||||
|
@ -1061,6 +1094,14 @@ func TestTerraformToBundleModifiedResources(t *testing.T) {
|
|||
{Attributes: stateInstanceAttributes{ID: "1"}},
|
||||
},
|
||||
},
|
||||
{
|
||||
Type: "databricks_cluster",
|
||||
Mode: "managed",
|
||||
Name: "test_cluster",
|
||||
Instances: []stateResourceInstance{
|
||||
{Attributes: stateInstanceAttributes{ID: "1"}},
|
||||
},
|
||||
},
|
||||
{
|
||||
Type: "databricks_volume",
|
||||
Mode: "managed",
|
||||
|
@ -1069,6 +1110,13 @@ func TestTerraformToBundleModifiedResources(t *testing.T) {
|
|||
{Attributes: stateInstanceAttributes{ID: "2"}},
|
||||
},
|
||||
},
|
||||
{Type: "databricks_cluster",
|
||||
Mode: "managed",
|
||||
Name: "test_cluster_old",
|
||||
Instances: []stateResourceInstance{
|
||||
{Attributes: stateInstanceAttributes{ID: "2"}},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
err := TerraformToBundle(&tfState, &config)
|
||||
|
@ -1137,6 +1185,13 @@ func TestTerraformToBundleModifiedResources(t *testing.T) {
|
|||
assert.Equal(t, "", config.Resources.Volumes["test_volume_new"].ID)
|
||||
assert.Equal(t, resources.ModifiedStatusCreated, config.Resources.Volumes["test_volume_new"].ModifiedStatus)
|
||||
|
||||
assert.Equal(t, "1", config.Resources.Clusters["test_cluster"].ID)
|
||||
assert.Equal(t, "", config.Resources.Clusters["test_cluster"].ModifiedStatus)
|
||||
assert.Equal(t, "2", config.Resources.Clusters["test_cluster_old"].ID)
|
||||
assert.Equal(t, resources.ModifiedStatusDeleted, config.Resources.Clusters["test_cluster_old"].ModifiedStatus)
|
||||
assert.Equal(t, "", config.Resources.Clusters["test_cluster_new"].ID)
|
||||
assert.Equal(t, resources.ModifiedStatusCreated, config.Resources.Clusters["test_cluster_new"].ModifiedStatus)
|
||||
|
||||
AssertFullResourceCoverage(t, &config)
|
||||
}
|
||||
|
||||
|
|
|
@ -33,7 +33,7 @@ func TestInitEnvironmentVariables(t *testing.T) {
|
|||
}
|
||||
|
||||
b := &bundle.Bundle{
|
||||
RootPath: t.TempDir(),
|
||||
BundleRootPath: t.TempDir(),
|
||||
Config: config.Root{
|
||||
Bundle: config.Bundle{
|
||||
Target: "whatever",
|
||||
|
@ -60,7 +60,7 @@ func TestSetTempDirEnvVarsForUnixWithTmpDirSet(t *testing.T) {
|
|||
}
|
||||
|
||||
b := &bundle.Bundle{
|
||||
RootPath: t.TempDir(),
|
||||
BundleRootPath: t.TempDir(),
|
||||
Config: config.Root{
|
||||
Bundle: config.Bundle{
|
||||
Target: "whatever",
|
||||
|
@ -88,7 +88,7 @@ func TestSetTempDirEnvVarsForUnixWithTmpDirNotSet(t *testing.T) {
|
|||
}
|
||||
|
||||
b := &bundle.Bundle{
|
||||
RootPath: t.TempDir(),
|
||||
BundleRootPath: t.TempDir(),
|
||||
Config: config.Root{
|
||||
Bundle: config.Bundle{
|
||||
Target: "whatever",
|
||||
|
@ -114,7 +114,7 @@ func TestSetTempDirEnvVarsForWindowWithAllTmpDirEnvVarsSet(t *testing.T) {
|
|||
}
|
||||
|
||||
b := &bundle.Bundle{
|
||||
RootPath: t.TempDir(),
|
||||
BundleRootPath: t.TempDir(),
|
||||
Config: config.Root{
|
||||
Bundle: config.Bundle{
|
||||
Target: "whatever",
|
||||
|
@ -144,7 +144,7 @@ func TestSetTempDirEnvVarsForWindowWithUserProfileAndTempSet(t *testing.T) {
|
|||
}
|
||||
|
||||
b := &bundle.Bundle{
|
||||
RootPath: t.TempDir(),
|
||||
BundleRootPath: t.TempDir(),
|
||||
Config: config.Root{
|
||||
Bundle: config.Bundle{
|
||||
Target: "whatever",
|
||||
|
@ -174,7 +174,7 @@ func TestSetTempDirEnvVarsForWindowsWithoutAnyTempDirEnvVarsSet(t *testing.T) {
|
|||
}
|
||||
|
||||
b := &bundle.Bundle{
|
||||
RootPath: t.TempDir(),
|
||||
BundleRootPath: t.TempDir(),
|
||||
Config: config.Root{
|
||||
Bundle: config.Bundle{
|
||||
Target: "whatever",
|
||||
|
@ -202,7 +202,7 @@ func TestSetTempDirEnvVarsForWindowsWithoutAnyTempDirEnvVarsSet(t *testing.T) {
|
|||
|
||||
func TestSetProxyEnvVars(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
RootPath: t.TempDir(),
|
||||
BundleRootPath: t.TempDir(),
|
||||
Config: config.Root{
|
||||
Bundle: config.Bundle{
|
||||
Target: "whatever",
|
||||
|
@ -250,7 +250,7 @@ func TestSetProxyEnvVars(t *testing.T) {
|
|||
|
||||
func TestSetUserAgentExtraEnvVar(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
RootPath: t.TempDir(),
|
||||
BundleRootPath: t.TempDir(),
|
||||
Config: config.Root{
|
||||
Experimental: &config.Experimental{
|
||||
PyDABs: config.PyDABs{
|
||||
|
@ -333,7 +333,7 @@ func TestFindExecPathFromEnvironmentWithWrongVersion(t *testing.T) {
|
|||
ctx := context.Background()
|
||||
m := &initialize{}
|
||||
b := &bundle.Bundle{
|
||||
RootPath: t.TempDir(),
|
||||
BundleRootPath: t.TempDir(),
|
||||
Config: config.Root{
|
||||
Bundle: config.Bundle{
|
||||
Target: "whatever",
|
||||
|
@ -357,7 +357,7 @@ func TestFindExecPathFromEnvironmentWithCorrectVersionAndNoBinary(t *testing.T)
|
|||
ctx := context.Background()
|
||||
m := &initialize{}
|
||||
b := &bundle.Bundle{
|
||||
RootPath: t.TempDir(),
|
||||
BundleRootPath: t.TempDir(),
|
||||
Config: config.Root{
|
||||
Bundle: config.Bundle{
|
||||
Target: "whatever",
|
||||
|
@ -380,7 +380,7 @@ func TestFindExecPathFromEnvironmentWithCorrectVersionAndBinary(t *testing.T) {
|
|||
ctx := context.Background()
|
||||
m := &initialize{}
|
||||
b := &bundle.Bundle{
|
||||
RootPath: t.TempDir(),
|
||||
BundleRootPath: t.TempDir(),
|
||||
Config: config.Root{
|
||||
Bundle: config.Bundle{
|
||||
Target: "whatever",
|
||||
|
|
|
@ -60,6 +60,8 @@ func (m *interpolateMutator) Apply(ctx context.Context, b *bundle.Bundle) diag.D
|
|||
path = dyn.NewPath(dyn.Key("databricks_schema")).Append(path[2:]...)
|
||||
case dyn.Key("volumes"):
|
||||
path = dyn.NewPath(dyn.Key("databricks_volume")).Append(path[2:]...)
|
||||
case dyn.Key("clusters"):
|
||||
path = dyn.NewPath(dyn.Key("databricks_cluster")).Append(path[2:]...)
|
||||
default:
|
||||
// Trigger "key not found" for unknown resource types.
|
||||
return dyn.GetByPath(root, path)
|
||||
|
|
|
@ -32,6 +32,7 @@ func TestInterpolate(t *testing.T) {
|
|||
"other_registered_model": "${resources.registered_models.other_registered_model.id}",
|
||||
"other_schema": "${resources.schemas.other_schema.id}",
|
||||
"other_volume": "${resources.volumes.other_volume.id}",
|
||||
"other_cluster": "${resources.clusters.other_cluster.id}",
|
||||
},
|
||||
Tasks: []jobs.Task{
|
||||
{
|
||||
|
@ -69,6 +70,7 @@ func TestInterpolate(t *testing.T) {
|
|||
assert.Equal(t, "${databricks_registered_model.other_registered_model.id}", j.Tags["other_registered_model"])
|
||||
assert.Equal(t, "${databricks_schema.other_schema.id}", j.Tags["other_schema"])
|
||||
assert.Equal(t, "${databricks_volume.other_volume.id}", j.Tags["other_volume"])
|
||||
assert.Equal(t, "${databricks_cluster.other_cluster.id}", j.Tags["other_cluster"])
|
||||
|
||||
m := b.Config.Resources.Models["my_model"]
|
||||
assert.Equal(t, "my_model", m.Model.Name)
|
||||
|
|
|
@ -17,7 +17,7 @@ func TestLoadWithNoState(t *testing.T) {
|
|||
}
|
||||
|
||||
b := &bundle.Bundle{
|
||||
RootPath: t.TempDir(),
|
||||
BundleRootPath: t.TempDir(),
|
||||
Config: config.Root{
|
||||
Bundle: config.Bundle{
|
||||
Target: "whatever",
|
||||
|
|
|
@ -32,7 +32,7 @@ func mockStateFilerForPull(t *testing.T, contents map[string]any, merr error) fi
|
|||
|
||||
func statePullTestBundle(t *testing.T) *bundle.Bundle {
|
||||
return &bundle.Bundle{
|
||||
RootPath: t.TempDir(),
|
||||
BundleRootPath: t.TempDir(),
|
||||
Config: config.Root{
|
||||
Bundle: config.Bundle{
|
||||
Target: "default",
|
||||
|
|
|
@ -47,6 +47,17 @@ func (l *statePush) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostic
|
|||
}
|
||||
defer local.Close()
|
||||
|
||||
if !b.Config.Bundle.Force {
|
||||
state, err := local.Stat()
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
||||
if state.Size() > deploy.MaxStateFileSize {
|
||||
return diag.Errorf("Terraform state file size exceeds the maximum allowed size of %d bytes. Please reduce the number of resources in your bundle, split your bundle into multiple or re-run the command with --force flag", deploy.MaxStateFileSize)
|
||||
}
|
||||
}
|
||||
|
||||
// Upload state file from local cache directory to filer.
|
||||
cmdio.LogString(ctx, "Updating deployment state...")
|
||||
log.Infof(ctx, "Writing local state file to remote state directory")
|
||||
|
|
|
@ -3,6 +3,7 @@ package terraform
|
|||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"testing"
|
||||
|
||||
|
@ -29,7 +30,7 @@ func mockStateFilerForPush(t *testing.T, fn func(body io.Reader)) filer.Filer {
|
|||
|
||||
func statePushTestBundle(t *testing.T) *bundle.Bundle {
|
||||
return &bundle.Bundle{
|
||||
RootPath: t.TempDir(),
|
||||
BundleRootPath: t.TempDir(),
|
||||
Config: config.Root{
|
||||
Bundle: config.Bundle{
|
||||
Target: "default",
|
||||
|
@ -59,3 +60,29 @@ func TestStatePush(t *testing.T) {
|
|||
diags := bundle.Apply(ctx, b, m)
|
||||
assert.NoError(t, diags.Error())
|
||||
}
|
||||
|
||||
func TestStatePushLargeState(t *testing.T) {
|
||||
mock := mockfiler.NewMockFiler(t)
|
||||
m := &statePush{
|
||||
identityFiler(mock),
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
b := statePushTestBundle(t)
|
||||
|
||||
largeState := map[string]any{}
|
||||
for i := 0; i < 1000000; i++ {
|
||||
largeState[fmt.Sprintf("field_%d", i)] = i
|
||||
}
|
||||
|
||||
// Write a stale local state file.
|
||||
writeLocalState(t, ctx, b, largeState)
|
||||
diags := bundle.Apply(ctx, b, m)
|
||||
assert.ErrorContains(t, diags.Error(), "Terraform state file size exceeds the maximum allowed size of 10485760 bytes. Please reduce the number of resources in your bundle, split your bundle into multiple or re-run the command with --force flag")
|
||||
|
||||
// Force the write.
|
||||
b = statePushTestBundle(t)
|
||||
b.Config.Bundle.Force = true
|
||||
diags = bundle.Apply(ctx, b, m)
|
||||
assert.NoError(t, diags.Error())
|
||||
}
|
||||
|
|
|
@ -0,0 +1,52 @@
|
|||
package tfdyn
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/databricks/cli/bundle/internal/tf/schema"
|
||||
"github.com/databricks/cli/libs/dyn"
|
||||
"github.com/databricks/cli/libs/dyn/convert"
|
||||
"github.com/databricks/cli/libs/log"
|
||||
"github.com/databricks/databricks-sdk-go/service/compute"
|
||||
)
|
||||
|
||||
func convertClusterResource(ctx context.Context, vin dyn.Value) (dyn.Value, error) {
|
||||
// Normalize the output value to the target schema.
|
||||
vout, diags := convert.Normalize(compute.ClusterSpec{}, vin)
|
||||
for _, diag := range diags {
|
||||
log.Debugf(ctx, "cluster normalization diagnostic: %s", diag.Summary)
|
||||
}
|
||||
|
||||
return vout, nil
|
||||
}
|
||||
|
||||
type clusterConverter struct{}
|
||||
|
||||
func (clusterConverter) Convert(ctx context.Context, key string, vin dyn.Value, out *schema.Resources) error {
|
||||
vout, err := convertClusterResource(ctx, vin)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// We always set no_wait as it allows DABs not to wait for cluster to be started.
|
||||
vout, err = dyn.Set(vout, "no_wait", dyn.V(true))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Add the converted resource to the output.
|
||||
out.Cluster[key] = vout.AsAny()
|
||||
|
||||
// Configure permissions for this resource.
|
||||
if permissions := convertPermissionsResource(ctx, vin); permissions != nil {
|
||||
permissions.ClusterId = fmt.Sprintf("${databricks_cluster.%s.id}", key)
|
||||
out.Permissions["cluster_"+key] = permissions
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
registerConverter("clusters", clusterConverter{})
|
||||
}
|
|
@ -0,0 +1,97 @@
|
|||
package tfdyn
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/databricks/cli/bundle/config/resources"
|
||||
"github.com/databricks/cli/bundle/internal/tf/schema"
|
||||
"github.com/databricks/cli/libs/dyn"
|
||||
"github.com/databricks/cli/libs/dyn/convert"
|
||||
"github.com/databricks/databricks-sdk-go/service/compute"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestConvertCluster(t *testing.T) {
|
||||
var src = resources.Cluster{
|
||||
ClusterSpec: &compute.ClusterSpec{
|
||||
NumWorkers: 3,
|
||||
SparkVersion: "13.3.x-scala2.12",
|
||||
ClusterName: "cluster",
|
||||
SparkConf: map[string]string{
|
||||
"spark.executor.memory": "2g",
|
||||
},
|
||||
AwsAttributes: &compute.AwsAttributes{
|
||||
Availability: "ON_DEMAND",
|
||||
},
|
||||
AzureAttributes: &compute.AzureAttributes{
|
||||
Availability: "SPOT",
|
||||
},
|
||||
DataSecurityMode: "USER_ISOLATION",
|
||||
NodeTypeId: "m5.xlarge",
|
||||
Autoscale: &compute.AutoScale{
|
||||
MinWorkers: 1,
|
||||
MaxWorkers: 10,
|
||||
},
|
||||
},
|
||||
|
||||
Permissions: []resources.Permission{
|
||||
{
|
||||
Level: "CAN_RUN",
|
||||
UserName: "jack@gmail.com",
|
||||
},
|
||||
{
|
||||
Level: "CAN_MANAGE",
|
||||
ServicePrincipalName: "sp",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
vin, err := convert.FromTyped(src, dyn.NilValue)
|
||||
require.NoError(t, err)
|
||||
|
||||
ctx := context.Background()
|
||||
out := schema.NewResources()
|
||||
err = clusterConverter{}.Convert(ctx, "my_cluster", vin, out)
|
||||
require.NoError(t, err)
|
||||
|
||||
cluster := out.Cluster["my_cluster"]
|
||||
assert.Equal(t, map[string]any{
|
||||
"num_workers": int64(3),
|
||||
"spark_version": "13.3.x-scala2.12",
|
||||
"cluster_name": "cluster",
|
||||
"spark_conf": map[string]any{
|
||||
"spark.executor.memory": "2g",
|
||||
},
|
||||
"aws_attributes": map[string]any{
|
||||
"availability": "ON_DEMAND",
|
||||
},
|
||||
"azure_attributes": map[string]any{
|
||||
"availability": "SPOT",
|
||||
},
|
||||
"data_security_mode": "USER_ISOLATION",
|
||||
"no_wait": true,
|
||||
"node_type_id": "m5.xlarge",
|
||||
"autoscale": map[string]any{
|
||||
"min_workers": int64(1),
|
||||
"max_workers": int64(10),
|
||||
},
|
||||
}, cluster)
|
||||
|
||||
// Assert equality on the permissions
|
||||
assert.Equal(t, &schema.ResourcePermissions{
|
||||
ClusterId: "${databricks_cluster.my_cluster.id}",
|
||||
AccessControl: []schema.ResourcePermissionsAccessControl{
|
||||
{
|
||||
PermissionLevel: "CAN_RUN",
|
||||
UserName: "jack@gmail.com",
|
||||
},
|
||||
{
|
||||
PermissionLevel: "CAN_MANAGE",
|
||||
ServicePrincipalName: "sp",
|
||||
},
|
||||
},
|
||||
}, out.Permissions["cluster_my_cluster"])
|
||||
|
||||
}
|
|
@ -3,6 +3,7 @@ package tfdyn
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sort"
|
||||
|
||||
"github.com/databricks/cli/bundle/internal/tf/schema"
|
||||
"github.com/databricks/cli/libs/dyn"
|
||||
|
@ -19,8 +20,38 @@ func convertJobResource(ctx context.Context, vin dyn.Value) (dyn.Value, error) {
|
|||
log.Debugf(ctx, "job normalization diagnostic: %s", diag.Summary)
|
||||
}
|
||||
|
||||
// Sort the tasks of each job in the bundle by task key. Sorting
|
||||
// the task keys ensures that the diff computed by terraform is correct and avoids
|
||||
// recreates. For more details see the NOTE at
|
||||
// https://registry.terraform.io/providers/databricks/databricks/latest/docs/resources/job#example-usage
|
||||
// and https://github.com/databricks/terraform-provider-databricks/issues/4011
|
||||
// and https://github.com/databricks/cli/pull/1776
|
||||
vout := vin
|
||||
var err error
|
||||
tasks, ok := vin.Get("tasks").AsSequence()
|
||||
if ok {
|
||||
sort.Slice(tasks, func(i, j int) bool {
|
||||
// We sort the tasks by their task key. Tasks without task keys are ordered
|
||||
// before tasks with task keys. We do not error for those tasks
|
||||
// since presence of a task_key is validated for in the Jobs backend.
|
||||
tk1, ok := tasks[i].Get("task_key").AsString()
|
||||
if !ok {
|
||||
return true
|
||||
}
|
||||
tk2, ok := tasks[j].Get("task_key").AsString()
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
return tk1 < tk2
|
||||
})
|
||||
vout, err = dyn.Set(vin, "tasks", dyn.V(tasks))
|
||||
if err != nil {
|
||||
return dyn.InvalidValue, err
|
||||
}
|
||||
}
|
||||
|
||||
// Modify top-level keys.
|
||||
vout, err := renameKeys(vin, map[string]string{
|
||||
vout, err = renameKeys(vout, map[string]string{
|
||||
"tasks": "task",
|
||||
"job_clusters": "job_cluster",
|
||||
"parameters": "parameter",
|
||||
|
|
|
@ -42,8 +42,8 @@ func TestConvertJob(t *testing.T) {
|
|||
},
|
||||
Tasks: []jobs.Task{
|
||||
{
|
||||
TaskKey: "task_key",
|
||||
JobClusterKey: "job_cluster_key",
|
||||
TaskKey: "task_key_b",
|
||||
JobClusterKey: "job_cluster_key_b",
|
||||
Libraries: []compute.Library{
|
||||
{
|
||||
Pypi: &compute.PythonPyPiLibrary{
|
||||
|
@ -55,6 +55,17 @@ func TestConvertJob(t *testing.T) {
|
|||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
TaskKey: "task_key_a",
|
||||
JobClusterKey: "job_cluster_key_a",
|
||||
},
|
||||
{
|
||||
TaskKey: "task_key_c",
|
||||
JobClusterKey: "job_cluster_key_c",
|
||||
},
|
||||
{
|
||||
Description: "missing task key 😱",
|
||||
},
|
||||
},
|
||||
},
|
||||
Permissions: []resources.Permission{
|
||||
|
@ -100,8 +111,15 @@ func TestConvertJob(t *testing.T) {
|
|||
},
|
||||
"task": []any{
|
||||
map[string]any{
|
||||
"task_key": "task_key",
|
||||
"job_cluster_key": "job_cluster_key",
|
||||
"description": "missing task key 😱",
|
||||
},
|
||||
map[string]any{
|
||||
"task_key": "task_key_a",
|
||||
"job_cluster_key": "job_cluster_key_a",
|
||||
},
|
||||
map[string]any{
|
||||
"task_key": "task_key_b",
|
||||
"job_cluster_key": "job_cluster_key_b",
|
||||
"library": []any{
|
||||
map[string]any{
|
||||
"pypi": map[string]any{
|
||||
|
@ -113,6 +131,10 @@ func TestConvertJob(t *testing.T) {
|
|||
},
|
||||
},
|
||||
},
|
||||
map[string]any{
|
||||
"task_key": "task_key_c",
|
||||
"job_cluster_key": "job_cluster_key_c",
|
||||
},
|
||||
},
|
||||
}, out.Job["my_job"])
|
||||
|
||||
|
|
|
@ -13,7 +13,7 @@ import (
|
|||
|
||||
func TestParseResourcesStateWithNoFile(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
RootPath: t.TempDir(),
|
||||
BundleRootPath: t.TempDir(),
|
||||
Config: config.Root{
|
||||
Bundle: config.Bundle{
|
||||
Target: "whatever",
|
||||
|
@ -31,7 +31,7 @@ func TestParseResourcesStateWithNoFile(t *testing.T) {
|
|||
func TestParseResourcesStateWithExistingStateFile(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
b := &bundle.Bundle{
|
||||
RootPath: t.TempDir(),
|
||||
BundleRootPath: t.TempDir(),
|
||||
Config: config.Root{
|
||||
Bundle: config.Bundle{
|
||||
Target: "whatever",
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue